diff --git a/Jenkinsfile b/Jenkinsfile index 9544343bec6ef964fb15cf94c7a1a7c93d98810f..ebac32cb241af1a35556262690544f84ca94d9fc 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -104,6 +104,10 @@ pipeline { find pytest -name '*'sql|xargs rm -rf ./test-all.sh p2 date''' + sh ''' + cd ${WKC}/tests + ./test-all.sh b4fq + ''' } } stage('test_b1') { diff --git a/cmake/version.inc b/cmake/version.inc index 7d0ad0585f67921c8c2ddbcf47ba08f803cc375a..49f01d00bc18ef4f0e1d87ac06b4b1bf09784268 100644 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -4,7 +4,7 @@ PROJECT(TDengine) IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "2.0.12.0") + SET(TD_VER_NUMBER "2.0.13.0") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/documentation20/webdocs/markdowndocs/Model-ch.md b/documentation20/webdocs/markdowndocs/Model-ch.md index dce7819423661a3748c4c5cd4402777e21b16a89..ea1be899a85fe6bb31ab03674ab496d7b301432f 100644 --- a/documentation20/webdocs/markdowndocs/Model-ch.md +++ b/documentation20/webdocs/markdowndocs/Model-ch.md @@ -4,6 +4,8 @@ TDengine采用关系型数据模型,需要建库、建表。因此对于一个具体的应用场景,需要考虑库的设计,超级表和普通表的设计。本节不讨论细致的语法规则,只介绍概念。 +关于数据建模请参考视频教程。 + ## 创建库 不同类型的数据采集点往往具有不同的数据特征,包括数据采集频率的高低,数据保留时间的长短,副本的数目,数据块的大小,是否允许更新数据等等。为让各种场景下TDengine都能最大效率的工作,TDengine建议将不同数据特征的表创建在不同的库里,因为每个库可以配置不同的存储策略。创建一个库时,除SQL标准的选项外,应用还可以指定保留时长、副本数、内存块个数、时间精度、文件块里最大最小记录条数、是否压缩、一个数据文件覆盖的天数等多种参数。比如: @@ -60,4 +62,3 @@ TDengine支持多列模型,只要物理量是一个数据采集点同时采集 TDengine建议尽可能采用多列模型,因为插入效率以及存储效率更高。但对于有些场景,一个采集点的采集量的种类经常变化,这个时候,如果采用多列模型,就需要频繁修改超级表的结构定义,让应用变的复杂,这个时候,采用单列模型会显得简单。 -关于数据建模请参考视频教程。 diff --git a/documentation20/webdocs/markdowndocs/cluster-ch.md b/documentation20/webdocs/markdowndocs/cluster-ch.md index f1c275ab0c4c986766fa8d33c71fe65777c90848..89f6a64f192c65ae422c6fc52600040d439456a7 100644 --- a/documentation20/webdocs/markdowndocs/cluster-ch.md +++ b/documentation20/webdocs/markdowndocs/cluster-ch.md @@ -6,6 +6,8 @@ TDengine的集群管理极其简单,除添加和删除节点需要人工干预之外,其他全部是自动完成,最大程度的降低了运维的工作量。本章对集群管理的操作做详细的描述。 +关于集群搭建请参考视频教程。 + ## 准备工作 **第零步**:规划集群所有物理节点的FQDN,将规划好的FQDN分别添加到每个物理节点的/etc/hostname;修改每个物理节点的/etc/hosts,将所有集群物理节点的IP与FQDN的对应添加好。【如部署了DNS,请联系网络管理员在DNS上做好相关配置】 @@ -227,4 +229,3 @@ SHOW MNODES; TDengine提供一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。请点击[安装包下载](https://www.taosdata.com/cn/all-downloads/),在TDengine Arbitrator Linux一节中,选择适合的版本下载并安装。该程序对系统资源几乎没有要求,只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号,缺省是6042。配置每个taosd实例时,可以在配置文件taos.cfg里将参数arbitrator设置为arbitrator的End Point。如果该参数配置了,当副本数为偶数数,系统将自动连接配置的arbitrator。如果副本数为奇数,即使配置了arbitrator, 系统也不会去建立连接。 -关于集群搭建请参考视频教程。 diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index d1a334664e17c89dc3f55849f8ceb203e5026fe6..c4b2039737723ca7704cb8a40a3f8d1e128df008 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -1,6 +1,6 @@ name: tdengine base: core18 -version: '2.0.12.0' +version: '2.0.13.0' icon: snap/gui/t-dengine.svg summary: an open-source big data platform designed and optimized for IoT. description: | @@ -72,7 +72,7 @@ parts: - usr/bin/taosd - usr/bin/taos - usr/bin/taosdemo - - usr/lib/libtaos.so.2.0.12.0 + - usr/lib/libtaos.so.2.0.13.0 - usr/lib/libtaos.so.1 - usr/lib/libtaos.so diff --git a/src/balance/src/bnMain.c b/src/balance/src/bnMain.c index 7725aa5db4ca9de5b72d8a900a4d9e0b3c0200db..3e1d5eda763c99afb8a0c5dcd3f08996eec80337 100644 --- a/src/balance/src/bnMain.c +++ b/src/balance/src/bnMain.c @@ -237,21 +237,21 @@ static bool bnCheckVgroupReady(SVgObj *pVgroup, SVnodeGid *pRmVnode) { bool isReady = false; for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) { SVnodeGid *pVnode = pVgroup->vnodeGid + i; + SDnodeObj *pDnode = pVnode->pDnode; if (pVnode == pRmVnode) continue; int32_t vver = mnodeGetVgidVer(pVnode->vver); - mTrace("vgId:%d, check vgroup status, vindex:%d dnode:%d status:%s role:%s vver:%d, rmvver:%d" , pVgroup->vgId, i, - pVnode->dnodeId, dnodeStatus[pVnode->pDnode->status], syncRole[pVnode->role], vver, rmVnodeVer); - if (pVnode->pDnode->status == TAOS_DN_STATUS_DROPPING) continue; - if (pVnode->pDnode->status == TAOS_DN_STATUS_OFFLINE) continue; + mTrace("vgId:%d, check vgroup status, vindex:%d dnode:%d status:%s role:%s vver:%d, rmvver:%d", pVgroup->vgId, i, + pVnode->dnodeId, dnodeStatus[pDnode->status], syncRole[pVnode->role], vver, rmVnodeVer); + if (pDnode->status == TAOS_DN_STATUS_DROPPING) continue; + if (pDnode->status == TAOS_DN_STATUS_OFFLINE) continue; if (pVnode->role != TAOS_SYNC_ROLE_SLAVE && pVnode->role != TAOS_SYNC_ROLE_MASTER) continue; if (rmVnodeVer == 0 || vver >= rmVnodeVer) { - mInfo("vgId:%d, is ready for vindex:%d in dnode:%d status:%s role:%s vver:%d larger than rmvver:%d", pVgroup->vgId, i, - pVnode->dnodeId, dnodeStatus[pVnode->pDnode->status], syncRole[pVnode->role], vver, rmVnodeVer); + mInfo("vgId:%d, is ready for vindex:%d in dnode:%d status:%s role:%s vver:%d larger than rmvver:%d", + pVgroup->vgId, i, pVnode->dnodeId, dnodeStatus[pDnode->status], syncRole[pVnode->role], vver, rmVnodeVer); + isReady = true; } - - isReady = true; } return isReady; diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index 96aeb9d60de1ab6fbaeebcb54e2da1ab316179f8..b7b3441bd18606d3b6444e508587f6552efc08ff 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -421,7 +421,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); // check if it is a sub-query of super table query first, if true, enter another routine - if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY|TSDB_QUERY_TYPE_TAG_FILTER_QUERY))) { + if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY|TSDB_QUERY_TYPE_SUBQUERY|TSDB_QUERY_TYPE_TAG_FILTER_QUERY))) { tscDebug("%p update local table meta, continue to process sql and send the corresponding query", pSql); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 7dd0de23269417372898de068c6d142349cccfb2..280c8b7ec2b8887d161fce04e5c22efecc0c4e46 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -75,11 +75,11 @@ static int32_t insertResultField(SQueryInfo* pQueryInfo, int32_t outputIndex, SC static int32_t convertFunctionId(int32_t optr, int16_t* functionId); static uint8_t convertOptr(SStrToken *pToken); -static int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSelection, bool isSTable, bool joinQuery); +static int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSelection, bool isSTable, bool joinQuery, bool intervalQuery); static bool validateIpAddress(const char* ip, size_t size); static bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo); -static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery); +static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery, bool intervalQuery); static int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd); @@ -1475,7 +1475,7 @@ static void addPrimaryTsColIntoResult(SQueryInfo* pQueryInfo) { pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY; } -int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSelection, bool isSTable, bool joinQuery) { +int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSelection, bool isSTable, bool joinQuery, bool intervalQuery) { assert(pSelection != NULL && pCmd != NULL); const char* msg2 = "functions can not be mixed up"; @@ -1531,7 +1531,7 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel addPrimaryTsColIntoResult(pQueryInfo); } - if (!functionCompatibleCheck(pQueryInfo, joinQuery)) { + if (!functionCompatibleCheck(pQueryInfo, joinQuery, intervalQuery)) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } @@ -2810,7 +2810,7 @@ bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) return false; } -static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery) { +static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery, bool intervalQuery) { int32_t startIdx = 0; size_t numOfExpr = tscSqlExprNumOfExprs(pQueryInfo); @@ -2826,6 +2826,10 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery) { int32_t factor = functionCompatList[tscSqlExprGet(pQueryInfo, startIdx)->functionId]; + if (tscSqlExprGet(pQueryInfo, 0)->functionId == TSDB_FUNC_LAST_ROW && (joinQuery || intervalQuery)) { + return false; + } + // diff function cannot be executed with other function // arithmetic function can be executed with other arithmetic functions size_t size = tscSqlExprNumOfExprs(pQueryInfo); @@ -2850,7 +2854,7 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery) { } } - if (functionId == TSDB_FUNC_LAST_ROW && joinQuery) { + if (functionId == TSDB_FUNC_LAST_ROW && (joinQuery || intervalQuery)) { return false; } } @@ -6303,7 +6307,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) { } bool isSTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo); - if (parseSelectClause(&pSql->cmd, 0, pQuerySql->pSelection, isSTable, false) != TSDB_CODE_SUCCESS) { + if (parseSelectClause(&pSql->cmd, 0, pQuerySql->pSelection, isSTable, false, false) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; } @@ -6552,7 +6556,9 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { int32_t joinQuery = (pQuerySql->from != NULL && taosArrayGetSize(pQuerySql->from) > 2); - if (parseSelectClause(pCmd, index, pQuerySql->pSelection, isSTable, joinQuery) != TSDB_CODE_SUCCESS) { + int32_t intervalQuery = !(pQuerySql->interval.type == 0 || pQuerySql->interval.n == 0); + + if (parseSelectClause(pCmd, index, pQuerySql->pSelection, isSTable, joinQuery, intervalQuery) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index a6e33778cde0a43f4cdd003e718ef49b89a41866..5d818692ed90480c4b461c1fe8aba9d785dee694 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -458,12 +458,13 @@ void tscFreeRegisteredSqlObj(void *pSql) { assert(RID_VALID(p->self)); - tscFreeSqlObj(p); - taosReleaseRef(tscRefId, pTscObj->rid); - int32_t num = atomic_sub_fetch_32(&pTscObj->numOfObj, 1); int32_t total = atomic_sub_fetch_32(&tscNumOfObj, 1); + tscDebug("%p free SqlObj, total in tscObj:%d, total:%d", pSql, num, total); + tscFreeSqlObj(p); + taosReleaseRef(tscRefId, pTscObj->rid); + } void tscFreeSqlObj(SSqlObj* pSql) { diff --git a/src/connector/C#/TDengineDriver.cs b/src/connector/C#/TDengineDriver.cs index b6f143e1813d60c1ac4ae8356efdca4929c51345..205269501d376a4753b3aedbfa8d512b2df31600 100644 --- a/src/connector/C#/TDengineDriver.cs +++ b/src/connector/C#/TDengineDriver.cs @@ -19,136 +19,137 @@ using System.Runtime.InteropServices; namespace TDengineDriver { - enum TDengineDataType { - TSDB_DATA_TYPE_NULL = 0, // 1 bytes - TSDB_DATA_TYPE_BOOL = 1, // 1 bytes - TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes - TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes - TSDB_DATA_TYPE_INT = 4, // 4 bytes - TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes - TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes - TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes - TSDB_DATA_TYPE_BINARY = 8, // string - TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes - TSDB_DATA_TYPE_NCHAR = 10 // unicode string - } - - enum TDengineInitOption - { - TSDB_OPTION_LOCALE = 0, - TSDB_OPTION_CHARSET = 1, - TSDB_OPTION_TIMEZONE = 2, - TDDB_OPTION_CONFIGDIR = 3, - TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4 - } - - class TDengineMeta - { - public string name; - public short size; - public byte type; - public string TypeName() + enum TDengineDataType { - switch ((TDengineDataType)type) - { - case TDengineDataType.TSDB_DATA_TYPE_BOOL: - return "BOOLEAN"; - case TDengineDataType.TSDB_DATA_TYPE_TINYINT: - return "BYTE"; - case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: - return "SHORT"; - case TDengineDataType.TSDB_DATA_TYPE_INT: - return "INT"; - case TDengineDataType.TSDB_DATA_TYPE_BIGINT: - return "LONG"; - case TDengineDataType.TSDB_DATA_TYPE_FLOAT: - return "FLOAT"; - case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: - return "DOUBLE"; - case TDengineDataType.TSDB_DATA_TYPE_BINARY: - return "STRING"; - case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: - return "TIMESTAMP"; - case TDengineDataType.TSDB_DATA_TYPE_NCHAR: - return "NCHAR"; - default: - return "undefine"; - } + TSDB_DATA_TYPE_NULL = 0, // 1 bytes + TSDB_DATA_TYPE_BOOL = 1, // 1 bytes + TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes + TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes + TSDB_DATA_TYPE_INT = 4, // 4 bytes + TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes + TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes + TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes + TSDB_DATA_TYPE_BINARY = 8, // string + TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes + TSDB_DATA_TYPE_NCHAR = 10 // unicode string } - } - class TDengine - { - public const int TSDB_CODE_SUCCESS = 0; + enum TDengineInitOption + { + TSDB_OPTION_LOCALE = 0, + TSDB_OPTION_CHARSET = 1, + TSDB_OPTION_TIMEZONE = 2, + TDDB_OPTION_CONFIGDIR = 3, + TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4 + } - [DllImport("taos.dll", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)] - static extern public void Init(); + class TDengineMeta + { + public string name; + public short size; + public byte type; + public string TypeName() + { + switch ((TDengineDataType)type) + { + case TDengineDataType.TSDB_DATA_TYPE_BOOL: + return "BOOLEAN"; + case TDengineDataType.TSDB_DATA_TYPE_TINYINT: + return "BYTE"; + case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: + return "SHORT"; + case TDengineDataType.TSDB_DATA_TYPE_INT: + return "INT"; + case TDengineDataType.TSDB_DATA_TYPE_BIGINT: + return "LONG"; + case TDengineDataType.TSDB_DATA_TYPE_FLOAT: + return "FLOAT"; + case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: + return "DOUBLE"; + case TDengineDataType.TSDB_DATA_TYPE_BINARY: + return "STRING"; + case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: + return "TIMESTAMP"; + case TDengineDataType.TSDB_DATA_TYPE_NCHAR: + return "NCHAR"; + default: + return "undefine"; + } + } + } - [DllImport("taos.dll", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)] - static extern public void Cleanup(); + class TDengine + { + public const int TSDB_CODE_SUCCESS = 0; - [DllImport("taos.dll", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)] - static extern public void Options(int option, string value); + [DllImport("taos", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)] + static extern public void Init(); - [DllImport("taos.dll", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr Connect(string ip, string user, string password, string db, short port); + [DllImport("taos", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)] + static extern public void Cleanup(); - [DllImport("taos.dll", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)] - static extern private IntPtr taos_errstr(IntPtr res); - static public string Error(IntPtr res) - { - IntPtr errPtr = taos_errstr(res); - return Marshal.PtrToStringAnsi(errPtr); - } + [DllImport("taos", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)] + static extern public void Options(int option, string value); - [DllImport("taos.dll", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)] - static extern public int ErrorNo(IntPtr res); + [DllImport("taos", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr Connect(string ip, string user, string password, string db, short port); - [DllImport("taos.dll", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr Query(IntPtr conn, string sqlstr); + [DllImport("taos", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)] + static extern private IntPtr taos_errstr(IntPtr res); + static public string Error(IntPtr res) + { + IntPtr errPtr = taos_errstr(res); + return Marshal.PtrToStringAnsi(errPtr); + } - [DllImport("taos.dll", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)] - static extern public int AffectRows(IntPtr res); + [DllImport("taos", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)] + static extern public int ErrorNo(IntPtr res); - [DllImport("taos.dll", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)] - static extern public int FieldCount(IntPtr res); + [DllImport("taos", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr Query(IntPtr conn, string sqlstr); - [DllImport("taos.dll", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)] - static extern private IntPtr taos_fetch_fields(IntPtr res); - static public List FetchFields(IntPtr res) - { - const int fieldSize = 68; - - List metas = new List(); - if (res == IntPtr.Zero) - { - return metas; - } - - int fieldCount = FieldCount(res); - IntPtr fieldsPtr = taos_fetch_fields(res); - - for (int i = 0; i < fieldCount; ++i) - { - int offset = i * fieldSize; - - TDengineMeta meta = new TDengineMeta(); - meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset); - meta.type = Marshal.ReadByte(fieldsPtr + offset + 65); - meta.size = Marshal.ReadInt16(fieldsPtr + offset + 66); - metas.Add(meta); - } - - return metas; - } + [DllImport("taos", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)] + static extern public int AffectRows(IntPtr res); - [DllImport("taos.dll", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr FetchRows(IntPtr res); + [DllImport("taos", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)] + static extern public int FieldCount(IntPtr res); - [DllImport("taos.dll", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr FreeResult(IntPtr res); + [DllImport("taos", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)] + static extern private IntPtr taos_fetch_fields(IntPtr res); + static public List FetchFields(IntPtr res) + { + const int fieldSize = 68; - [DllImport("taos.dll", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)] - static extern public int Close(IntPtr taos); - } -} \ No newline at end of file + List metas = new List(); + if (res == IntPtr.Zero) + { + return metas; + } + + int fieldCount = FieldCount(res); + IntPtr fieldsPtr = taos_fetch_fields(res); + + for (int i = 0; i < fieldCount; ++i) + { + int offset = i * fieldSize; + + TDengineMeta meta = new TDengineMeta(); + meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset); + meta.type = Marshal.ReadByte(fieldsPtr + offset + 65); + meta.size = Marshal.ReadInt16(fieldsPtr + offset + 66); + metas.Add(meta); + } + + return metas; + } + + [DllImport("taos", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr FetchRows(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr FreeResult(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)] + static extern public int Close(IntPtr taos); + } +} diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml index 25a36e3a4822bfbea0c7f07ff3a4a09032d44542..9865fc7127b39debc965f6be9bad6e8485169416 100755 --- a/src/connector/jdbc/pom.xml +++ b/src/connector/jdbc/pom.xml @@ -56,6 +56,12 @@ test + + mysql + mysql-connector-java + 5.1.47 + + org.apache.httpcomponents @@ -73,7 +79,14 @@ 1.2.58 + + mysql + mysql-connector-java + 5.1.49 + + + diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDatabaseMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDatabaseMetaData.java new file mode 100644 index 0000000000000000000000000000000000000000..1445be18654ff3e73b74484b47e09856ddc94b01 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDatabaseMetaData.java @@ -0,0 +1,808 @@ +/*************************************************************************** + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + *****************************************************************************/ +package com.taosdata.jdbc; + +import java.sql.*; +import java.util.ArrayList; +import java.util.List; + +public abstract class AbstractDatabaseMetaData implements DatabaseMetaData { + + private final static String PRODUCT_NAME = "TDengine"; + private final static String PRODUCT_VESION = "2.0.x.x"; + private final static String DRIVER_NAME = "taos-jdbcdriver"; + private final static String DRIVER_VERSION = "2.0.x"; + private final static int DRIVER_MAJAR_VERSION = 2; + private final static int DRIVER_MINOR_VERSION = 0; + + public boolean allProceduresAreCallable() throws SQLException { + return false; + } + + public boolean allTablesAreSelectable() throws SQLException { + return false; + } + + public abstract String getURL() throws SQLException; + + public abstract String getUserName() throws SQLException; + + public boolean isReadOnly() throws SQLException { + return false; + } + + public boolean nullsAreSortedHigh() throws SQLException { + return false; + } + + public boolean nullsAreSortedLow() throws SQLException { + return !nullsAreSortedHigh(); + } + + public boolean nullsAreSortedAtStart() throws SQLException { + return true; + } + + public boolean nullsAreSortedAtEnd() throws SQLException { + return !nullsAreSortedAtStart(); + } + + public String getDatabaseProductName() throws SQLException { + return PRODUCT_NAME; + } + + public String getDatabaseProductVersion() throws SQLException { + return PRODUCT_VESION; + } + + public String getDriverName() throws SQLException { + return DRIVER_NAME; + } + + public String getDriverVersion() throws SQLException { + return DRIVER_VERSION; + } + + public int getDriverMajorVersion() { + return DRIVER_MAJAR_VERSION; + } + + public int getDriverMinorVersion() { + return DRIVER_MINOR_VERSION; + } + + public boolean usesLocalFiles() throws SQLException { + return false; + } + + public boolean usesLocalFilePerTable() throws SQLException { + return false; + } + + public boolean supportsMixedCaseIdentifiers() throws SQLException { + return false; + } + + public boolean storesUpperCaseIdentifiers() throws SQLException { + return false; + } + + public boolean storesLowerCaseIdentifiers() throws SQLException { + return false; + } + + public boolean storesMixedCaseIdentifiers() throws SQLException { + return false; + } + + public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException { + return false; + } + + public boolean storesUpperCaseQuotedIdentifiers() throws SQLException { + return false; + } + + public boolean storesLowerCaseQuotedIdentifiers() throws SQLException { + return false; + } + + public boolean storesMixedCaseQuotedIdentifiers() throws SQLException { + return false; + } + + public String getIdentifierQuoteString() throws SQLException { + return " "; + } + + public String getSQLKeywords() throws SQLException { + return null; + } + + public String getNumericFunctions() throws SQLException { + return null; + } + + public String getStringFunctions() throws SQLException { + return null; + } + + public String getSystemFunctions() throws SQLException { + return null; + } + + public String getTimeDateFunctions() throws SQLException { + return null; + } + + public String getSearchStringEscape() throws SQLException { + return null; + } + + public String getExtraNameCharacters() throws SQLException { + return null; + } + + public boolean supportsAlterTableWithAddColumn() throws SQLException { + return true; + } + + public boolean supportsAlterTableWithDropColumn() throws SQLException { + return true; + } + + public boolean supportsColumnAliasing() throws SQLException { + return true; + } + + public boolean nullPlusNonNullIsNull() throws SQLException { + return false; + } + + public boolean supportsConvert() throws SQLException { + return false; + } + + public boolean supportsConvert(int fromType, int toType) throws SQLException { + return false; + } + + public boolean supportsTableCorrelationNames() throws SQLException { + return false; + } + + public boolean supportsDifferentTableCorrelationNames() throws SQLException { + return false; + } + + public boolean supportsExpressionsInOrderBy() throws SQLException { + return false; + } + + public boolean supportsOrderByUnrelated() throws SQLException { + return false; + } + + public boolean supportsGroupBy() throws SQLException { + return false; + } + + public boolean supportsGroupByUnrelated() throws SQLException { + return false; + } + + public boolean supportsGroupByBeyondSelect() throws SQLException { + return false; + } + + public boolean supportsLikeEscapeClause() throws SQLException { + return false; + } + + public boolean supportsMultipleResultSets() throws SQLException { + return false; + } + + public boolean supportsMultipleTransactions() throws SQLException { + return false; + } + + public boolean supportsNonNullableColumns() throws SQLException { + return false; + } + + public boolean supportsMinimumSQLGrammar() throws SQLException { + return false; + } + + public boolean supportsCoreSQLGrammar() throws SQLException { + return false; + } + + public boolean supportsExtendedSQLGrammar() throws SQLException { + return false; + } + + public boolean supportsANSI92EntryLevelSQL() throws SQLException { + return false; + } + + public boolean supportsANSI92IntermediateSQL() throws SQLException { + return false; + } + + public boolean supportsANSI92FullSQL() throws SQLException { + return false; + } + + public boolean supportsIntegrityEnhancementFacility() throws SQLException { + return false; + } + + public boolean supportsOuterJoins() throws SQLException { + return false; + } + + public boolean supportsFullOuterJoins() throws SQLException { + return false; + } + + public boolean supportsLimitedOuterJoins() throws SQLException { + return false; + } + + public String getSchemaTerm() throws SQLException { + return null; + } + + public String getProcedureTerm() throws SQLException { + return null; + } + + public String getCatalogTerm() throws SQLException { + return "database"; + } + + public boolean isCatalogAtStart() throws SQLException { + return true; + } + + public String getCatalogSeparator() throws SQLException { + return "."; + } + + public boolean supportsSchemasInDataManipulation() throws SQLException { + return false; + } + + public boolean supportsSchemasInProcedureCalls() throws SQLException { + return false; + } + + public boolean supportsSchemasInTableDefinitions() throws SQLException { + return false; + } + + public boolean supportsSchemasInIndexDefinitions() throws SQLException { + return false; + } + + public boolean supportsSchemasInPrivilegeDefinitions() throws SQLException { + return false; + } + + public boolean supportsCatalogsInDataManipulation() throws SQLException { + return true; + } + + public boolean supportsCatalogsInProcedureCalls() throws SQLException { + return false; + } + + public boolean supportsCatalogsInTableDefinitions() throws SQLException { + return false; + } + + public boolean supportsCatalogsInIndexDefinitions() throws SQLException { + return false; + } + + public boolean supportsCatalogsInPrivilegeDefinitions() throws SQLException { + return false; + } + + public boolean supportsPositionedDelete() throws SQLException { + return false; + } + + public boolean supportsPositionedUpdate() throws SQLException { + return false; + } + + public boolean supportsSelectForUpdate() throws SQLException { + return false; + } + + public boolean supportsStoredProcedures() throws SQLException { + return false; + } + + public boolean supportsSubqueriesInComparisons() throws SQLException { + return false; + } + + public boolean supportsSubqueriesInExists() throws SQLException { + return false; + } + + public boolean supportsSubqueriesInIns() throws SQLException { + return false; + } + + public boolean supportsSubqueriesInQuantifieds() throws SQLException { + return false; + } + + public boolean supportsCorrelatedSubqueries() throws SQLException { + return false; + } + + public boolean supportsUnion() throws SQLException { + return false; + } + + public boolean supportsUnionAll() throws SQLException { + return false; + } + + public boolean supportsOpenCursorsAcrossCommit() throws SQLException { + return false; + } + + public boolean supportsOpenCursorsAcrossRollback() throws SQLException { + return false; + } + + public boolean supportsOpenStatementsAcrossCommit() throws SQLException { + return false; + } + + public boolean supportsOpenStatementsAcrossRollback() throws SQLException { + return false; + } + + public int getMaxBinaryLiteralLength() throws SQLException { + return 0; + } + + public int getMaxCharLiteralLength() throws SQLException { + return 0; + } + + public int getMaxColumnNameLength() throws SQLException { + return 0; + } + + public int getMaxColumnsInGroupBy() throws SQLException { + return 0; + } + + public int getMaxColumnsInIndex() throws SQLException { + return 0; + } + + public int getMaxColumnsInOrderBy() throws SQLException { + return 0; + } + + public int getMaxColumnsInSelect() throws SQLException { + return 0; + } + + public int getMaxColumnsInTable() throws SQLException { + return 0; + } + + public int getMaxConnections() throws SQLException { + return 0; + } + + public int getMaxCursorNameLength() throws SQLException { + return 0; + } + + public int getMaxIndexLength() throws SQLException { + return 0; + } + + public int getMaxSchemaNameLength() throws SQLException { + return 0; + } + + public int getMaxProcedureNameLength() throws SQLException { + return 0; + } + + public int getMaxCatalogNameLength() throws SQLException { + return 0; + } + + public int getMaxRowSize() throws SQLException { + return 0; + } + + public boolean doesMaxRowSizeIncludeBlobs() throws SQLException { + return false; + } + + public int getMaxStatementLength() throws SQLException { + return 0; + } + + public int getMaxStatements() throws SQLException { + return 0; + } + + public int getMaxTableNameLength() throws SQLException { + return 0; + } + + public int getMaxTablesInSelect() throws SQLException { + return 0; + } + + public int getMaxUserNameLength() throws SQLException { + return 0; + } + + public int getDefaultTransactionIsolation() throws SQLException { + return 0; + } + + public boolean supportsTransactions() throws SQLException { + return false; + } + + public boolean supportsTransactionIsolationLevel(int level) throws SQLException { + return false; + } + + public boolean supportsDataDefinitionAndDataManipulationTransactions() throws SQLException { + return false; + } + + public boolean supportsDataManipulationTransactionsOnly() throws SQLException { + return false; + } + + public boolean dataDefinitionCausesTransactionCommit() throws SQLException { + return false; + } + + public boolean dataDefinitionIgnoredInTransactions() throws SQLException { + return false; + } + + public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern) + throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern, + String columnNamePattern) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public abstract ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) + throws SQLException; + + public ResultSet getSchemas() throws SQLException { + return getEmptyResultSet(); + } + + public abstract ResultSet getCatalogs() throws SQLException; + + public ResultSet getTableTypes() throws SQLException { + DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet(); + + // set up ColumnMetaDataList + List columnMetaDataList = new ArrayList(1); + ColumnMetaData colMetaData = new ColumnMetaData(); + colMetaData.setColIndex(0); + colMetaData.setColName("TABLE_TYPE"); + colMetaData.setColSize(10); + colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_BINARY); + columnMetaDataList.add(colMetaData); + + // set up rowDataList + List rowDataList = new ArrayList(2); + TSDBResultSetRowData rowData = new TSDBResultSetRowData(); + rowData.setString(0, "TABLE"); + rowDataList.add(rowData); + rowData = new TSDBResultSetRowData(); + rowData.setString(0, "STABLE"); + rowDataList.add(rowData); + + resultSet.setColumnMetaDataList(columnMetaDataList); + resultSet.setRowDataList(rowDataList); + return resultSet; + } + + public abstract ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException; + + protected int getNullable(int index, String typeName) { + if (index == 0 && "TIMESTAMP".equals(typeName)) + return DatabaseMetaData.columnNoNulls; + return DatabaseMetaData.columnNullable; + } + + protected int getColumnSize(String typeName, int length) { + switch (typeName) { + case "TIMESTAMP": + return 23; + + default: + return 0; + } + } + + protected int getDecimalDigits(String typeName) { + switch (typeName) { + case "FLOAT": + return 5; + case "DOUBLE": + return 9; + default: + return 0; + } + } + + protected int getDataType(String typeName) { + switch (typeName) { + case "TIMESTAMP": + return Types.TIMESTAMP; + case "INT": + return Types.INTEGER; + case "BIGINT": + return Types.BIGINT; + case "FLOAT": + return Types.FLOAT; + case "DOUBLE": + return Types.DOUBLE; + case "BINARY": + return Types.BINARY; + case "SMALLINT": + return Types.SMALLINT; + case "TINYINT": + return Types.TINYINT; + case "BOOL": + return Types.BOOLEAN; + case "NCHAR": + return Types.NCHAR; + default: + return Types.NULL; + } + } + + public ResultSet getColumnPrivileges(String catalog, String schema, String table, String columnNamePattern) + throws SQLException { + return getEmptyResultSet(); + } + + public ResultSet getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern) + throws SQLException { + return getEmptyResultSet(); + } + + public ResultSet getBestRowIdentifier(String catalog, String schema, String table, int scope, boolean nullable) + throws SQLException { + return getEmptyResultSet(); + } + + public ResultSet getVersionColumns(String catalog, String schema, String table) throws SQLException { + return getEmptyResultSet(); + } + + public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException { + return getEmptyResultSet(); + } + + public ResultSet getImportedKeys(String catalog, String schema, String table) throws SQLException { + return getEmptyResultSet(); + } + + public ResultSet getExportedKeys(String catalog, String schema, String table) throws SQLException { + return getEmptyResultSet(); + } + + public ResultSet getCrossReference(String parentCatalog, String parentSchema, String parentTable, + String foreignCatalog, String foreignSchema, String foreignTable) throws SQLException { + return getEmptyResultSet(); + } + + public ResultSet getTypeInfo() throws SQLException { + return getEmptyResultSet(); + } + + public ResultSet getIndexInfo(String catalog, String schema, String table, boolean unique, boolean approximate) + throws SQLException { + return getEmptyResultSet(); + } + + public boolean supportsResultSetType(int type) throws SQLException { + return false; + } + + public boolean supportsResultSetConcurrency(int type, int concurrency) throws SQLException { + return false; + } + + public boolean ownUpdatesAreVisible(int type) throws SQLException { + return false; + } + + public boolean ownDeletesAreVisible(int type) throws SQLException { + return false; + } + + public boolean ownInsertsAreVisible(int type) throws SQLException { + return false; + } + + public boolean othersUpdatesAreVisible(int type) throws SQLException { + return false; + } + + public boolean othersDeletesAreVisible(int type) throws SQLException { + return false; + } + + public boolean othersInsertsAreVisible(int type) throws SQLException { + return false; + } + + public boolean updatesAreDetected(int type) throws SQLException { + return false; + } + + public boolean deletesAreDetected(int type) throws SQLException { + return false; + } + + public boolean insertsAreDetected(int type) throws SQLException { + return false; + } + + public boolean supportsBatchUpdates() throws SQLException { + return false; + } + + public ResultSet getUDTs(String catalog, String schemaPattern, String typeNamePattern, int[] types) + throws SQLException { + return getEmptyResultSet(); + } + + public Connection getConnection() throws SQLException { + return null; + } + + public boolean supportsSavepoints() throws SQLException { + return false; + } + + public boolean supportsNamedParameters() throws SQLException { + return false; + } + + public boolean supportsMultipleOpenResults() throws SQLException { + return false; + } + + public boolean supportsGetGeneratedKeys() throws SQLException { + return false; + } + + public ResultSet getSuperTypes(String catalog, String schemaPattern, String typeNamePattern) throws SQLException { + return getEmptyResultSet(); + } + + public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException { + return getEmptyResultSet(); + } + + public ResultSet getAttributes(String catalog, String schemaPattern, String typeNamePattern, + String attributeNamePattern) throws SQLException { + return getEmptyResultSet(); + } + + public boolean supportsResultSetHoldability(int holdability) throws SQLException { + return false; + } + + public int getResultSetHoldability() throws SQLException { + return 0; + } + + public int getDatabaseMajorVersion() throws SQLException { + return 0; + } + + public int getDatabaseMinorVersion() throws SQLException { + return 0; + } + + public int getJDBCMajorVersion() throws SQLException { + return 0; + } + + public int getJDBCMinorVersion() throws SQLException { + return 0; + } + + public int getSQLStateType() throws SQLException { + return 0; + } + + public boolean locatorsUpdateCopy() throws SQLException { + return false; + } + + public boolean supportsStatementPooling() throws SQLException { + return false; + } + + public RowIdLifetime getRowIdLifetime() throws SQLException { + return null; + } + + public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLException { + return null; + } + + public boolean supportsStoredFunctionsUsingCallSyntax() throws SQLException { + return false; + } + + public boolean autoCommitFailureClosesAllResultSets() throws SQLException { + return false; + } + + public ResultSet getClientInfoProperties() throws SQLException { + return getEmptyResultSet(); + } + + public ResultSet getFunctions(String catalog, String schemaPattern, String functionNamePattern) + throws SQLException { + return getEmptyResultSet(); + } + + public ResultSet getFunctionColumns(String catalog, String schemaPattern, String functionNamePattern, + String columnNamePattern) throws SQLException { + return getEmptyResultSet(); + } + + public ResultSet getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern, + String columnNamePattern) throws SQLException { + return getEmptyResultSet(); + } + + public boolean generatedKeyAlwaysReturned() throws SQLException { + return false; + } + + private ResultSet getEmptyResultSet() { + return new EmptyResultSet(); + } +} \ No newline at end of file diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java index 3940e809300b7d8e0b6c79038afaa6ff76f81bf0..4f4911aad9c138eb13fffdd698b794a03222160f 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java @@ -19,68 +19,71 @@ import java.util.Map; public abstract class TSDBConstants { - public static final String DEFAULT_PORT = "6200"; - public static final String UNSUPPORT_METHOD_EXCEPTIONZ_MSG = "this operation is NOT supported currently!"; - public static final String INVALID_VARIABLES = "invalid variables"; - public static Map DATATYPE_MAP = null; + public static final String DEFAULT_PORT = "6200"; + public static final String UNSUPPORT_METHOD_EXCEPTIONZ_MSG = "this operation is NOT supported currently!"; + public static final String INVALID_VARIABLES = "invalid variables"; + public static Map DATATYPE_MAP = null; - public static final long JNI_NULL_POINTER = 0L; + public static final long JNI_NULL_POINTER = 0L; - public static final int JNI_SUCCESS = 0; - public static final int JNI_TDENGINE_ERROR = -1; - public static final int JNI_CONNECTION_NULL = -2; - public static final int JNI_RESULT_SET_NULL = -3; - public static final int JNI_NUM_OF_FIELDS_0 = -4; - public static final int JNI_SQL_NULL = -5; - public static final int JNI_FETCH_END = -6; - - public static final int TSDB_DATA_TYPE_NULL = 0; - public static final int TSDB_DATA_TYPE_BOOL = 1; - public static final int TSDB_DATA_TYPE_TINYINT = 2; - public static final int TSDB_DATA_TYPE_SMALLINT = 3; - public static final int TSDB_DATA_TYPE_INT = 4; - public static final int TSDB_DATA_TYPE_BIGINT = 5; - public static final int TSDB_DATA_TYPE_FLOAT = 6; - public static final int TSDB_DATA_TYPE_DOUBLE = 7; - public static final int TSDB_DATA_TYPE_BINARY = 8; - public static final int TSDB_DATA_TYPE_TIMESTAMP = 9; - public static final int TSDB_DATA_TYPE_NCHAR = 10; - - public static String WrapErrMsg(String msg) { - return "TDengine Error: " + msg; - } + public static final int JNI_SUCCESS = 0; + public static final int JNI_TDENGINE_ERROR = -1; + public static final int JNI_CONNECTION_NULL = -2; + public static final int JNI_RESULT_SET_NULL = -3; + public static final int JNI_NUM_OF_FIELDS_0 = -4; + public static final int JNI_SQL_NULL = -5; + public static final int JNI_FETCH_END = -6; - public static String FixErrMsg(int code) { - switch (code) { - case JNI_TDENGINE_ERROR: - return WrapErrMsg("internal error of database!"); - case JNI_CONNECTION_NULL: - return WrapErrMsg("invalid tdengine connection!"); - case JNI_RESULT_SET_NULL: - return WrapErrMsg("invalid resultset pointer!"); - case JNI_NUM_OF_FIELDS_0: - return WrapErrMsg("invalid num of fields!"); - case JNI_SQL_NULL: - return WrapErrMsg("can't execute empty sql!"); - case JNI_FETCH_END: - return WrapErrMsg("fetch to the end of resultset"); - default: - break; - } - return WrapErrMsg("unkown error!"); - } + public static final int TSDB_DATA_TYPE_NULL = 0; + public static final int TSDB_DATA_TYPE_BOOL = 1; + public static final int TSDB_DATA_TYPE_TINYINT = 2; + public static final int TSDB_DATA_TYPE_SMALLINT = 3; + public static final int TSDB_DATA_TYPE_INT = 4; + public static final int TSDB_DATA_TYPE_BIGINT = 5; + public static final int TSDB_DATA_TYPE_FLOAT = 6; + public static final int TSDB_DATA_TYPE_DOUBLE = 7; + public static final int TSDB_DATA_TYPE_BINARY = 8; + public static final int TSDB_DATA_TYPE_TIMESTAMP = 9; + public static final int TSDB_DATA_TYPE_NCHAR = 10; - static { - DATATYPE_MAP = new HashMap(); - DATATYPE_MAP.put(1, "BOOL"); - DATATYPE_MAP.put(2, "TINYINT"); - DATATYPE_MAP.put(3, "SMALLINT"); - DATATYPE_MAP.put(4, "INT"); - DATATYPE_MAP.put(5, "BIGINT"); - DATATYPE_MAP.put(6, "FLOAT"); - DATATYPE_MAP.put(7, "DOUBLE"); - DATATYPE_MAP.put(8, "BINARY"); - DATATYPE_MAP.put(9, "TIMESTAMP"); - DATATYPE_MAP.put(10, "NCHAR"); - } + // nchar field's max length + public static final int maxFieldSize = 16 * 1024; + + public static String WrapErrMsg(String msg) { + return "TDengine Error: " + msg; + } + + public static String FixErrMsg(int code) { + switch (code) { + case JNI_TDENGINE_ERROR: + return WrapErrMsg("internal error of database!"); + case JNI_CONNECTION_NULL: + return WrapErrMsg("invalid tdengine connection!"); + case JNI_RESULT_SET_NULL: + return WrapErrMsg("invalid resultset pointer!"); + case JNI_NUM_OF_FIELDS_0: + return WrapErrMsg("invalid num of fields!"); + case JNI_SQL_NULL: + return WrapErrMsg("can't execute empty sql!"); + case JNI_FETCH_END: + return WrapErrMsg("fetch to the end of resultset"); + default: + break; + } + return WrapErrMsg("unkown error!"); + } + + static { + DATATYPE_MAP = new HashMap(); + DATATYPE_MAP.put(1, "BOOL"); + DATATYPE_MAP.put(2, "TINYINT"); + DATATYPE_MAP.put(3, "SMALLINT"); + DATATYPE_MAP.put(4, "INT"); + DATATYPE_MAP.put(5, "BIGINT"); + DATATYPE_MAP.put(6, "FLOAT"); + DATATYPE_MAP.put(7, "DOUBLE"); + DATATYPE_MAP.put(8, "BINARY"); + DATATYPE_MAP.put(9, "TIMESTAMP"); + DATATYPE_MAP.put(10, "NCHAR"); + } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java index 6b0937a9b7c50c25d03459d18e01b807b5c00c3c..5260b780bd510edad6ef8ea9a481fa334cca50f6 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java @@ -1,19 +1,29 @@ package com.taosdata.jdbc.rs; import com.taosdata.jdbc.TSDBConstants; +import com.taosdata.jdbc.TSDBDriver; import java.sql.*; +import java.util.Enumeration; +import java.util.HashMap; import java.util.Map; import java.util.Properties; import java.util.concurrent.Executor; public class RestfulConnection implements Connection { + private static final String CONNECTION_IS_CLOSED = "connection is closed."; + private static final String AUTO_COMMIT_IS_TRUE = "auto commit is true"; private final String host; private final int port; private final Properties props; - private final String database; + private volatile String database; private final String url; + /******************************************************/ + private boolean isClosed; + private DatabaseMetaData metadata; + private Map> typeMap; + private Properties clientInfoProps = new Properties(); public RestfulConnection(String host, String port, Properties props, String database, String url) { this.host = host; @@ -21,280 +31,424 @@ public class RestfulConnection implements Connection { this.props = props; this.database = database; this.url = url; + this.metadata = new RestfulDatabaseMetaData(url, props.getProperty(TSDBDriver.PROPERTY_KEY_USER), this); } @Override public Statement createStatement() throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg("restful TDengine connection is closed.")); + throw new SQLException(CONNECTION_IS_CLOSED); + return new RestfulStatement(this, database); } @Override public PreparedStatement prepareStatement(String sql) throws SQLException { - return null; + if (isClosed()) + throw new SQLException(CONNECTION_IS_CLOSED); + //TODO: prepareStatement + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public CallableStatement prepareCall(String sql) throws SQLException { - return null; + if (isClosed()) + throw new SQLException(CONNECTION_IS_CLOSED); + + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public String nativeSQL(String sql) throws SQLException { - return null; + if (isClosed()) + throw new SQLException(CONNECTION_IS_CLOSED); + + //nothing did + return sql; } @Override public void setAutoCommit(boolean autoCommit) throws SQLException { - + if (isClosed()) + throw new SQLException(CONNECTION_IS_CLOSED); + if (!autoCommit) + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public boolean getAutoCommit() throws SQLException { - return false; + if (isClosed()) + throw new SQLException(CONNECTION_IS_CLOSED); + return true; } @Override public void commit() throws SQLException { - + if (isClosed()) + throw new SQLException(CONNECTION_IS_CLOSED); + if (getAutoCommit()) + throw new SQLException(AUTO_COMMIT_IS_TRUE); + //nothing to do } @Override public void rollback() throws SQLException { - + if (isClosed()) + throw new SQLException(CONNECTION_IS_CLOSED); + if (getAutoCommit()) + throw new SQLException(AUTO_COMMIT_IS_TRUE); + //nothing to do } @Override public void close() throws SQLException { - + if (isClosed) + return; + //TODO: release all resources + isClosed = true; } @Override public boolean isClosed() throws SQLException { - return false; + return isClosed; } @Override public DatabaseMetaData getMetaData() throws SQLException { - //TODO: RestfulDatabaseMetaData is not implemented - return new RestfulDatabaseMetaData(); + if (isClosed()) + throw new SQLException(CONNECTION_IS_CLOSED); + + return this.metadata; } @Override public void setReadOnly(boolean readOnly) throws SQLException { - + if (isClosed()) + throw new SQLException(CONNECTION_IS_CLOSED); + // nothing to do } @Override public boolean isReadOnly() throws SQLException { - return false; + if (isClosed()) + throw new SQLException(CONNECTION_IS_CLOSED); + return true; } @Override public void setCatalog(String catalog) throws SQLException { - + if (isClosed()) + throw new SQLException(CONNECTION_IS_CLOSED); + synchronized (RestfulConnection.class) { + this.database = catalog; + } } @Override public String getCatalog() throws SQLException { - return null; + if (isClosed()) + throw new SQLException(CONNECTION_IS_CLOSED); + return this.database; } @Override public void setTransactionIsolation(int level) throws SQLException { - //transaction is not supported - throw new SQLFeatureNotSupportedException("transactions are not supported"); + if (isClosed()) + throw new SQLException(CONNECTION_IS_CLOSED); + switch (level) { + case Connection.TRANSACTION_NONE: + break; + case Connection.TRANSACTION_READ_UNCOMMITTED: + case Connection.TRANSACTION_READ_COMMITTED: + case Connection.TRANSACTION_REPEATABLE_READ: + case Connection.TRANSACTION_SERIALIZABLE: + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + default: + throw new SQLException(TSDBConstants.INVALID_VARIABLES); + } } - /** - * - */ @Override public int getTransactionIsolation() throws SQLException { + if (isClosed()) + throw new SQLException(CONNECTION_IS_CLOSED); //Connection.TRANSACTION_NONE specifies that transactions are not supported. return Connection.TRANSACTION_NONE; } @Override public SQLWarning getWarnings() throws SQLException { - //TODO: getWarnings not implemented + if (isClosed()) + throw new SQLException(CONNECTION_IS_CLOSED); + return null; } @Override public void clearWarnings() throws SQLException { - throw new SQLFeatureNotSupportedException("clearWarnings not supported."); + if (isClosed()) + throw new SQLException(CONNECTION_IS_CLOSED); + //nothing to do } @Override public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException { - return null; + if (isClosed()) + throw new SQLException(CONNECTION_IS_CLOSED); + + if (resultSetType != ResultSet.TYPE_FORWARD_ONLY) { + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + if (resultSetConcurrency != ResultSet.CONCUR_READ_ONLY) + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + return createStatement(); } @Override public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { - return null; + if (isClosed()) + throw new SQLException(CONNECTION_IS_CLOSED); + if (resultSetType != ResultSet.TYPE_FORWARD_ONLY || resultSetConcurrency != ResultSet.CONCUR_READ_ONLY) + throw new SQLFeatureNotSupportedException(TSDBConstants.INVALID_VARIABLES); + + return this.prepareStatement(sql); } @Override public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { - return null; + if (isClosed()) + throw new SQLException(CONNECTION_IS_CLOSED); + if (resultSetType != ResultSet.TYPE_FORWARD_ONLY || resultSetConcurrency != ResultSet.CONCUR_READ_ONLY) + throw new SQLFeatureNotSupportedException(TSDBConstants.INVALID_VARIABLES); + + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public Map> getTypeMap() throws SQLException { - return null; + if (isClosed()) + throw new SQLException(CONNECTION_IS_CLOSED); + + synchronized (RestfulConnection.class) { + if (this.typeMap == null) { + this.typeMap = new HashMap<>(); + } + return this.typeMap; + } } @Override public void setTypeMap(Map> map) throws SQLException { + if (isClosed()) + throw new SQLException(CONNECTION_IS_CLOSED); + synchronized (RestfulConnection.class) { + this.typeMap = map; + } } @Override public void setHoldability(int holdability) throws SQLException { - + if (isClosed()) + throw new SQLException(CONNECTION_IS_CLOSED); + if (holdability != ResultSet.HOLD_CURSORS_OVER_COMMIT) + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public int getHoldability() throws SQLException { - return 0; + if (isClosed()) + throw new SQLException(CONNECTION_IS_CLOSED); + return ResultSet.HOLD_CURSORS_OVER_COMMIT; } @Override public Savepoint setSavepoint() throws SQLException { - return null; + if (isClosed()) + throw new SQLException(CONNECTION_IS_CLOSED); + if (getAutoCommit()) + throw new SQLException(TSDBConstants.INVALID_VARIABLES); + //nothing to do + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public Savepoint setSavepoint(String name) throws SQLException { - return null; + if (isClosed()) + throw new SQLException(CONNECTION_IS_CLOSED); + if (getAutoCommit()) + throw new SQLException(TSDBConstants.INVALID_VARIABLES); + //nothing to do + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void rollback(Savepoint savepoint) throws SQLException { - + if (isClosed()) + throw new SQLException(CONNECTION_IS_CLOSED); + if (getAutoCommit()) + throw new SQLException(TSDBConstants.INVALID_VARIABLES); + //nothing to do + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void releaseSavepoint(Savepoint savepoint) throws SQLException { - + if (isClosed()) + throw new SQLException(CONNECTION_IS_CLOSED); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { - return null; + if (resultSetHoldability != ResultSet.HOLD_CURSORS_OVER_COMMIT) + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + return createStatement(resultSetType, resultSetConcurrency); } @Override public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { - return null; + if (resultSetHoldability != ResultSet.HOLD_CURSORS_OVER_COMMIT) + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + return prepareStatement(sql, resultSetType, resultSetConcurrency); } @Override public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { - return null; + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { - return null; + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { - return null; + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException { - return null; + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public Clob createClob() throws SQLException { - //TODO: not supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public Blob createBlob() throws SQLException { - //TODO: not supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public NClob createNClob() throws SQLException { - //TODO: not supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public SQLXML createSQLXML() throws SQLException { - //TODO: not supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public boolean isValid(int timeout) throws SQLException { - return false; + if (timeout < 0) + throw new SQLException(TSDBConstants.INVALID_VARIABLES); + // TODO: + /* The driver shall submit a query on the connection or use some other mechanism that positively verifies + the connection is still valid when this method is called.*/ + return !isClosed(); } @Override public void setClientInfo(String name, String value) throws SQLClientInfoException { - + if (isClosed) + throw new SQLClientInfoException(); + clientInfoProps.setProperty(name, value); } @Override public void setClientInfo(Properties properties) throws SQLClientInfoException { + if (isClosed) + throw new SQLClientInfoException(); + for (Enumeration enumer = properties.keys(); enumer.hasMoreElements(); ) { + String name = (String) enumer.nextElement(); + clientInfoProps.put(name, properties.getProperty(name)); + } } @Override public String getClientInfo(String name) throws SQLException { - return null; + if (isClosed) + throw new SQLClientInfoException(); + + return clientInfoProps.getProperty(name); } @Override public Properties getClientInfo() throws SQLException { - return null; + if (isClosed) + throw new SQLClientInfoException(); + + return clientInfoProps; } @Override public Array createArrayOf(String typeName, Object[] elements) throws SQLException { - //TODO: not supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public Struct createStruct(String typeName, Object[] attributes) throws SQLException { - //TODO: not supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void setSchema(String schema) throws SQLException { - + if (isClosed()) + throw new SQLException(CONNECTION_IS_CLOSED); + synchronized (RestfulConnection.class) { + this.database = schema; + } } @Override public String getSchema() throws SQLException { - return null; + if (isClosed()) + throw new SQLException(CONNECTION_IS_CLOSED); + return this.database; } @Override public void abort(Executor executor) throws SQLException { + if (executor == null) { + throw new SQLException("Executor can not be null"); + } + executor.execute(() -> { + try { + close(); + } catch (SQLException e) { + e.printStackTrace(); + } + }); } @Override public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { - + if (isClosed()) + throw new SQLException(CONNECTION_IS_CLOSED); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public int getNetworkTimeout() throws SQLException { + if (isClosed()) + throw new SQLException(CONNECTION_IS_CLOSED); return 0; } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDatabaseMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDatabaseMetaData.java index 2b4d7899fa6dc6fbd45e01aacd14e99b3a6fb536..21d2c6402f820f6548b9fb954f83e14a02065d20 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDatabaseMetaData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDatabaseMetaData.java @@ -1,307 +1,32 @@ package com.taosdata.jdbc.rs; +import com.taosdata.jdbc.*; + import java.sql.*; +import java.util.ArrayList; +import java.util.List; -public class RestfulDatabaseMetaData implements DatabaseMetaData { +public class RestfulDatabaseMetaData extends AbstractDatabaseMetaData { - @Override - public boolean allProceduresAreCallable() throws SQLException { - return false; - } - @Override - public boolean allTablesAreSelectable() throws SQLException { - return false; + private final String url; + private final String userName; + private final Connection connection; + + public RestfulDatabaseMetaData(String url, String userName, Connection connection) { + this.url = url; + this.userName = userName; + this.connection = connection; } @Override public String getURL() throws SQLException { - return null; + return this.url; } @Override public String getUserName() throws SQLException { - return null; - } - - @Override - public boolean isReadOnly() throws SQLException { - return false; - } - - @Override - public boolean nullsAreSortedHigh() throws SQLException { - return false; - } - - @Override - public boolean nullsAreSortedLow() throws SQLException { - return false; - } - - @Override - public boolean nullsAreSortedAtStart() throws SQLException { - return false; - } - - @Override - public boolean nullsAreSortedAtEnd() throws SQLException { - return false; - } - - @Override - public String getDatabaseProductName() throws SQLException { - return null; - } - - @Override - public String getDatabaseProductVersion() throws SQLException { - return null; - } - - @Override - public String getDriverName() throws SQLException { - return null; - } - - @Override - public String getDriverVersion() throws SQLException { - return null; - } - - @Override - public int getDriverMajorVersion() { - return 0; - } - - @Override - public int getDriverMinorVersion() { - return 0; - } - - @Override - public boolean usesLocalFiles() throws SQLException { - return false; - } - - @Override - public boolean usesLocalFilePerTable() throws SQLException { - return false; - } - - @Override - public boolean supportsMixedCaseIdentifiers() throws SQLException { - return false; - } - - @Override - public boolean storesUpperCaseIdentifiers() throws SQLException { - return false; - } - - @Override - public boolean storesLowerCaseIdentifiers() throws SQLException { - return false; - } - - @Override - public boolean storesMixedCaseIdentifiers() throws SQLException { - return false; - } - - @Override - public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException { - return false; - } - - @Override - public boolean storesUpperCaseQuotedIdentifiers() throws SQLException { - return false; - } - - @Override - public boolean storesLowerCaseQuotedIdentifiers() throws SQLException { - return false; - } - - @Override - public boolean storesMixedCaseQuotedIdentifiers() throws SQLException { - return false; - } - - @Override - public String getIdentifierQuoteString() throws SQLException { - return null; - } - - @Override - public String getSQLKeywords() throws SQLException { - return null; - } - - @Override - public String getNumericFunctions() throws SQLException { - return null; - } - - @Override - public String getStringFunctions() throws SQLException { - return null; - } - - @Override - public String getSystemFunctions() throws SQLException { - return null; - } - - @Override - public String getTimeDateFunctions() throws SQLException { - return null; - } - - @Override - public String getSearchStringEscape() throws SQLException { - return null; - } - - @Override - public String getExtraNameCharacters() throws SQLException { - return null; - } - - @Override - public boolean supportsAlterTableWithAddColumn() throws SQLException { - return false; - } - - @Override - public boolean supportsAlterTableWithDropColumn() throws SQLException { - return false; - } - - @Override - public boolean supportsColumnAliasing() throws SQLException { - return false; - } - - @Override - public boolean nullPlusNonNullIsNull() throws SQLException { - return false; - } - - @Override - public boolean supportsConvert() throws SQLException { - return false; - } - - @Override - public boolean supportsConvert(int fromType, int toType) throws SQLException { - return false; - } - - @Override - public boolean supportsTableCorrelationNames() throws SQLException { - return false; - } - - @Override - public boolean supportsDifferentTableCorrelationNames() throws SQLException { - return false; - } - - @Override - public boolean supportsExpressionsInOrderBy() throws SQLException { - return false; - } - - @Override - public boolean supportsOrderByUnrelated() throws SQLException { - return false; - } - - @Override - public boolean supportsGroupBy() throws SQLException { - return false; - } - - @Override - public boolean supportsGroupByUnrelated() throws SQLException { - return false; - } - - @Override - public boolean supportsGroupByBeyondSelect() throws SQLException { - return false; - } - - @Override - public boolean supportsLikeEscapeClause() throws SQLException { - return false; - } - - @Override - public boolean supportsMultipleResultSets() throws SQLException { - return false; - } - - @Override - public boolean supportsMultipleTransactions() throws SQLException { - return false; - } - - @Override - public boolean supportsNonNullableColumns() throws SQLException { - return false; - } - - @Override - public boolean supportsMinimumSQLGrammar() throws SQLException { - return false; - } - - @Override - public boolean supportsCoreSQLGrammar() throws SQLException { - return false; - } - - @Override - public boolean supportsExtendedSQLGrammar() throws SQLException { - return false; - } - - @Override - public boolean supportsANSI92EntryLevelSQL() throws SQLException { - return false; - } - - @Override - public boolean supportsANSI92IntermediateSQL() throws SQLException { - return false; - } - - @Override - public boolean supportsANSI92FullSQL() throws SQLException { - return false; - } - - @Override - public boolean supportsIntegrityEnhancementFacility() throws SQLException { - return false; - } - - @Override - public boolean supportsOuterJoins() throws SQLException { - return false; - } - - @Override - public boolean supportsFullOuterJoins() throws SQLException { - return false; - } - - @Override - public boolean supportsLimitedOuterJoins() throws SQLException { - return false; + return this.userName; } @Override @@ -325,554 +50,149 @@ public class RestfulDatabaseMetaData implements DatabaseMetaData { } @Override - public String getCatalogSeparator() throws SQLException { - return null; - } - - @Override - public boolean supportsSchemasInDataManipulation() throws SQLException { - return false; - } - - @Override - public boolean supportsSchemasInProcedureCalls() throws SQLException { - return false; - } - - @Override - public boolean supportsSchemasInTableDefinitions() throws SQLException { - return false; - } - - @Override - public boolean supportsSchemasInIndexDefinitions() throws SQLException { - return false; + public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException { + Statement stmt = null; + if (null != connection && !connection.isClosed()) { + stmt = connection.createStatement(); + if (catalog == null || catalog.length() < 1) { + catalog = connection.getCatalog(); + } + stmt.executeUpdate("use " + catalog); + ResultSet resultSet0 = stmt.executeQuery("show tables"); + GetTablesResultSet getTablesResultSet = new GetTablesResultSet(resultSet0, catalog, schemaPattern, tableNamePattern, types); + return getTablesResultSet; + } else { + throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + } } @Override - public boolean supportsSchemasInPrivilegeDefinitions() throws SQLException { - return false; + public ResultSet getCatalogs() throws SQLException { + if (connection != null && !connection.isClosed()) { + Statement stmt = connection.createStatement(); + ResultSet resultSet0 = stmt.executeQuery("show databases"); + CatalogResultSet resultSet = new CatalogResultSet(resultSet0); + return resultSet; + } else { + return new EmptyResultSet(); + } } @Override - public boolean supportsCatalogsInDataManipulation() throws SQLException { + public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException { + Statement stmt = null; + if (null != connection && !connection.isClosed()) { + stmt = connection.createStatement(); + if (catalog == null || catalog.length() < 1) { + catalog = connection.getCatalog(); + } + stmt.execute("use " + catalog); + + DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet(); + // set up ColumnMetaDataList + List columnMetaDataList = new ArrayList<>(24); + columnMetaDataList.add(null); + columnMetaDataList.add(null); + // add TABLE_NAME + ColumnMetaData colMetaData = new ColumnMetaData(); + colMetaData.setColIndex(3); + colMetaData.setColName("TABLE_NAME"); + colMetaData.setColSize(193); + colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_BINARY); + columnMetaDataList.add(colMetaData); + // add COLUMN_NAME + colMetaData = new ColumnMetaData(); + colMetaData.setColIndex(4); + colMetaData.setColName("COLUMN_NAME"); + colMetaData.setColSize(65); + colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_BINARY); + columnMetaDataList.add(colMetaData); + // add DATA_TYPE + colMetaData = new ColumnMetaData(); + colMetaData.setColIndex(5); + colMetaData.setColName("DATA_TYPE"); + colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT); + columnMetaDataList.add(colMetaData); + // add TYPE_NAME + colMetaData = new ColumnMetaData(); + colMetaData.setColIndex(6); + colMetaData.setColName("TYPE_NAME"); + colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_BINARY); + columnMetaDataList.add(colMetaData); + // add COLUMN_SIZE + colMetaData = new ColumnMetaData(); + colMetaData.setColIndex(7); + colMetaData.setColName("COLUMN_SIZE"); + colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT); + columnMetaDataList.add(colMetaData); + // add BUFFER_LENGTH ,not used + columnMetaDataList.add(null); + // add DECIMAL_DIGITS + colMetaData = new ColumnMetaData(); + colMetaData.setColIndex(9); + colMetaData.setColName("DECIMAL_DIGITS"); + colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT); + columnMetaDataList.add(colMetaData); + // add NUM_PREC_RADIX + colMetaData = new ColumnMetaData(); + colMetaData.setColIndex(10); + colMetaData.setColName("NUM_PREC_RADIX"); + colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT); + columnMetaDataList.add(colMetaData); + // add NULLABLE + colMetaData = new ColumnMetaData(); + colMetaData.setColIndex(11); + colMetaData.setColName("NULLABLE"); + colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT); + columnMetaDataList.add(colMetaData); + + resultSet.setColumnMetaDataList(columnMetaDataList); + + // set up rowDataList + ResultSet resultSet0 = stmt.executeQuery("describe " + tableNamePattern); + List rowDataList = new ArrayList<>(); + int index = 0; + while (resultSet0.next()) { + TSDBResultSetRowData rowData = new TSDBResultSetRowData(24); + // set TABLE_NAME + rowData.setString(2, tableNamePattern); + // set COLUMN_NAME + rowData.setString(3, resultSet0.getString(1)); + // set DATA_TYPE + String typeName = resultSet0.getString(2); + rowData.setInt(4, getDataType(typeName)); + // set TYPE_NAME + rowData.setString(5, typeName); + // set COLUMN_SIZE + int length = resultSet0.getInt(3); + rowData.setInt(6, getColumnSize(typeName, length)); + // set DECIMAL_DIGITS + rowData.setInt(8, getDecimalDigits(typeName)); + // set NUM_PREC_RADIX + rowData.setInt(9, 10); + // set NULLABLE + rowData.setInt(10, getNullable(index, typeName)); + rowDataList.add(rowData); + index++; + } + resultSet.setRowDataList(rowDataList); + + return resultSet; + } else { + throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + } + } + + @Override + public long getMaxLogicalLobSize() throws SQLException { + return 0; + } + + @Override + public boolean supportsRefCursors() throws SQLException { return false; } - @Override - public boolean supportsCatalogsInProcedureCalls() throws SQLException { - return false; - } - - @Override - public boolean supportsCatalogsInTableDefinitions() throws SQLException { - return false; - } - - @Override - public boolean supportsCatalogsInIndexDefinitions() throws SQLException { - return false; - } - - @Override - public boolean supportsCatalogsInPrivilegeDefinitions() throws SQLException { - return false; - } - - @Override - public boolean supportsPositionedDelete() throws SQLException { - return false; - } - - @Override - public boolean supportsPositionedUpdate() throws SQLException { - return false; - } - - @Override - public boolean supportsSelectForUpdate() throws SQLException { - return false; - } - - @Override - public boolean supportsStoredProcedures() throws SQLException { - return false; - } - - @Override - public boolean supportsSubqueriesInComparisons() throws SQLException { - return false; - } - - @Override - public boolean supportsSubqueriesInExists() throws SQLException { - return false; - } - - @Override - public boolean supportsSubqueriesInIns() throws SQLException { - return false; - } - - @Override - public boolean supportsSubqueriesInQuantifieds() throws SQLException { - return false; - } - - @Override - public boolean supportsCorrelatedSubqueries() throws SQLException { - return false; - } - - @Override - public boolean supportsUnion() throws SQLException { - return false; - } - - @Override - public boolean supportsUnionAll() throws SQLException { - return false; - } - - @Override - public boolean supportsOpenCursorsAcrossCommit() throws SQLException { - return false; - } - - @Override - public boolean supportsOpenCursorsAcrossRollback() throws SQLException { - return false; - } - - @Override - public boolean supportsOpenStatementsAcrossCommit() throws SQLException { - return false; - } - - @Override - public boolean supportsOpenStatementsAcrossRollback() throws SQLException { - return false; - } - - @Override - public int getMaxBinaryLiteralLength() throws SQLException { - return 0; - } - - @Override - public int getMaxCharLiteralLength() throws SQLException { - return 0; - } - - @Override - public int getMaxColumnNameLength() throws SQLException { - return 0; - } - - @Override - public int getMaxColumnsInGroupBy() throws SQLException { - return 0; - } - - @Override - public int getMaxColumnsInIndex() throws SQLException { - return 0; - } - - @Override - public int getMaxColumnsInOrderBy() throws SQLException { - return 0; - } - - @Override - public int getMaxColumnsInSelect() throws SQLException { - return 0; - } - - @Override - public int getMaxColumnsInTable() throws SQLException { - return 0; - } - - @Override - public int getMaxConnections() throws SQLException { - return 0; - } - - @Override - public int getMaxCursorNameLength() throws SQLException { - return 0; - } - - @Override - public int getMaxIndexLength() throws SQLException { - return 0; - } - - @Override - public int getMaxSchemaNameLength() throws SQLException { - return 0; - } - - @Override - public int getMaxProcedureNameLength() throws SQLException { - return 0; - } - - @Override - public int getMaxCatalogNameLength() throws SQLException { - return 0; - } - - @Override - public int getMaxRowSize() throws SQLException { - return 0; - } - - @Override - public boolean doesMaxRowSizeIncludeBlobs() throws SQLException { - return false; - } - - @Override - public int getMaxStatementLength() throws SQLException { - return 0; - } - - @Override - public int getMaxStatements() throws SQLException { - return 0; - } - - @Override - public int getMaxTableNameLength() throws SQLException { - return 0; - } - - @Override - public int getMaxTablesInSelect() throws SQLException { - return 0; - } - - @Override - public int getMaxUserNameLength() throws SQLException { - return 0; - } - - @Override - public int getDefaultTransactionIsolation() throws SQLException { - return 0; - } - - @Override - public boolean supportsTransactions() throws SQLException { - return false; - } - - @Override - public boolean supportsTransactionIsolationLevel(int level) throws SQLException { - return false; - } - - @Override - public boolean supportsDataDefinitionAndDataManipulationTransactions() throws SQLException { - return false; - } - - @Override - public boolean supportsDataManipulationTransactionsOnly() throws SQLException { - return false; - } - - @Override - public boolean dataDefinitionCausesTransactionCommit() throws SQLException { - return false; - } - - @Override - public boolean dataDefinitionIgnoredInTransactions() throws SQLException { - return false; - } - - @Override - public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern) throws SQLException { - return null; - } - - @Override - public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern, String columnNamePattern) throws SQLException { - return null; - } - - @Override - public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException { - return null; - } - - @Override - public ResultSet getSchemas() throws SQLException { - return null; - } - - @Override - public ResultSet getCatalogs() throws SQLException { - return null; - } - - @Override - public ResultSet getTableTypes() throws SQLException { - return null; - } - - @Override - public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException { - return null; - } - - @Override - public ResultSet getColumnPrivileges(String catalog, String schema, String table, String columnNamePattern) throws SQLException { - return null; - } - - @Override - public ResultSet getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern) throws SQLException { - return null; - } - - @Override - public ResultSet getBestRowIdentifier(String catalog, String schema, String table, int scope, boolean nullable) throws SQLException { - return null; - } - - @Override - public ResultSet getVersionColumns(String catalog, String schema, String table) throws SQLException { - return null; - } - - @Override - public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException { - return null; - } - - @Override - public ResultSet getImportedKeys(String catalog, String schema, String table) throws SQLException { - return null; - } - - @Override - public ResultSet getExportedKeys(String catalog, String schema, String table) throws SQLException { - return null; - } - - @Override - public ResultSet getCrossReference(String parentCatalog, String parentSchema, String parentTable, String foreignCatalog, String foreignSchema, String foreignTable) throws SQLException { - return null; - } - - @Override - public ResultSet getTypeInfo() throws SQLException { - return null; - } - - @Override - public ResultSet getIndexInfo(String catalog, String schema, String table, boolean unique, boolean approximate) throws SQLException { - return null; - } - - @Override - public boolean supportsResultSetType(int type) throws SQLException { - return false; - } - - @Override - public boolean supportsResultSetConcurrency(int type, int concurrency) throws SQLException { - return false; - } - - @Override - public boolean ownUpdatesAreVisible(int type) throws SQLException { - return false; - } - - @Override - public boolean ownDeletesAreVisible(int type) throws SQLException { - return false; - } - - @Override - public boolean ownInsertsAreVisible(int type) throws SQLException { - return false; - } - - @Override - public boolean othersUpdatesAreVisible(int type) throws SQLException { - return false; - } - - @Override - public boolean othersDeletesAreVisible(int type) throws SQLException { - return false; - } - - @Override - public boolean othersInsertsAreVisible(int type) throws SQLException { - return false; - } - - @Override - public boolean updatesAreDetected(int type) throws SQLException { - return false; - } - - @Override - public boolean deletesAreDetected(int type) throws SQLException { - return false; - } - - @Override - public boolean insertsAreDetected(int type) throws SQLException { - return false; - } - - @Override - public boolean supportsBatchUpdates() throws SQLException { - return false; - } - - @Override - public ResultSet getUDTs(String catalog, String schemaPattern, String typeNamePattern, int[] types) throws SQLException { - return null; - } - - @Override - public Connection getConnection() throws SQLException { - return null; - } - - @Override - public boolean supportsSavepoints() throws SQLException { - return false; - } - - @Override - public boolean supportsNamedParameters() throws SQLException { - return false; - } - - @Override - public boolean supportsMultipleOpenResults() throws SQLException { - return false; - } - - @Override - public boolean supportsGetGeneratedKeys() throws SQLException { - return false; - } - - @Override - public ResultSet getSuperTypes(String catalog, String schemaPattern, String typeNamePattern) throws SQLException { - return null; - } - - @Override - public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException { - return null; - } - - @Override - public ResultSet getAttributes(String catalog, String schemaPattern, String typeNamePattern, String attributeNamePattern) throws SQLException { - return null; - } - - @Override - public boolean supportsResultSetHoldability(int holdability) throws SQLException { - return false; - } - - @Override - public int getResultSetHoldability() throws SQLException { - return 0; - } - - @Override - public int getDatabaseMajorVersion() throws SQLException { - return 0; - } - - @Override - public int getDatabaseMinorVersion() throws SQLException { - return 0; - } - - @Override - public int getJDBCMajorVersion() throws SQLException { - return 0; - } - - @Override - public int getJDBCMinorVersion() throws SQLException { - return 0; - } - - @Override - public int getSQLStateType() throws SQLException { - return 0; - } - - @Override - public boolean locatorsUpdateCopy() throws SQLException { - return false; - } - - @Override - public boolean supportsStatementPooling() throws SQLException { - return false; - } - - @Override - public RowIdLifetime getRowIdLifetime() throws SQLException { - return null; - } - - @Override - public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLException { - return null; - } - - @Override - public boolean supportsStoredFunctionsUsingCallSyntax() throws SQLException { - return false; - } - - @Override - public boolean autoCommitFailureClosesAllResultSets() throws SQLException { - return false; - } - - @Override - public ResultSet getClientInfoProperties() throws SQLException { - return null; - } - - @Override - public ResultSet getFunctions(String catalog, String schemaPattern, String functionNamePattern) throws SQLException { - return null; - } - - @Override - public ResultSet getFunctionColumns(String catalog, String schemaPattern, String functionNamePattern, String columnNamePattern) throws SQLException { - return null; - } - - @Override - public ResultSet getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException { - return null; - } - - @Override - public boolean generatedKeyAlwaysReturned() throws SQLException { - return false; - } @Override public T unwrap(Class iface) throws SQLException { diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java index 9e87cfa68015ece9fdb0208b290daaecf75b2191..cb6ff369f2b43f7cfae63fcc57701d692d3d803b 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java @@ -33,7 +33,7 @@ public class RestfulDriver extends AbstractTaosDriver { return null; Properties props = parseURL(url, info); - String host = props.getProperty(TSDBDriver.PROPERTY_KEY_HOST, "localhost"); + String host = props.getProperty(TSDBDriver.PROPERTY_KEY_HOST); String port = props.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "6041"); String database = props.containsKey(TSDBDriver.PROPERTY_KEY_DBNAME) ? props.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME) : null; diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java index c536ae4a8957519cdcb8d64f95fbdf2934624fff..1aa3d5b3cefe2524f0246b500af6687a79d6b20c 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java @@ -1,7 +1,8 @@ package com.taosdata.jdbc.rs; +import com.alibaba.fastjson.JSONArray; +import com.alibaba.fastjson.JSONObject; import com.taosdata.jdbc.TSDBConstants; -import org.apache.commons.lang3.StringUtils; import java.io.InputStream; import java.io.Reader; @@ -9,39 +10,111 @@ import java.math.BigDecimal; import java.net.URL; import java.sql.*; import java.util.ArrayList; -import java.util.Arrays; import java.util.Calendar; +import java.util.List; import java.util.Map; public class RestfulResultSet implements ResultSet { - private boolean isClosed = false; + private static final String RESULT_SET_IS_CLOSED = "resultSet is closed."; + private volatile boolean isClosed; private int pos = -1; - private ArrayList> data; - private ArrayList fields; - - public RestfulResultSet(String str, String fieldData) { - data = new ArrayList<>(); - str = str.substring(2, str.length() - 2); - ArrayList strTemp = new ArrayList<>(Arrays.asList(str.split("],\\["))); - for (String s : strTemp) { - ArrayList curr = new ArrayList<>(Arrays.asList(s.split(","))); - data.add(curr); + + private final String database; + private final Statement statement; + // data + private ArrayList> resultSet; + // meta + private ArrayList columnNames; + private ArrayList columns; + private RestfulResultSetMetaData metaData; + + /** + * 由一个result的Json构造结果集,对应执行show databases,show tables等这些语句,返回结果集,但无法获取结果集对应的meta,统一当成String处理 + ***/ + public RestfulResultSet(String database, Statement statement, JSONObject resultJson) { + this.database = database; + this.statement = statement; + // row data + JSONArray data = resultJson.getJSONArray("data"); + resultSet = new ArrayList<>(); + int columnIndex = 0; + for (; columnIndex < data.size(); columnIndex++) { + ArrayList oneRow = new ArrayList<>(); + JSONArray one = data.getJSONArray(columnIndex); + for (int j = 0; j < one.size(); j++) { + oneRow.add(one.getString(j)); + } + resultSet.add(oneRow); + } + + // column only names + columnNames = new ArrayList<>(); + columns = new ArrayList<>(); + JSONArray head = resultJson.getJSONArray("head"); + for (int i = 0; i < head.size(); i++) { + String name = head.getString(i); + columnNames.add(name); + columns.add(new Field(name, "", 0, "")); } - if (!StringUtils.isBlank(fieldData)) { - fields = new ArrayList<>(); - fieldData = fieldData.substring(2, fieldData.length() - 2); - ArrayList fieldTemp = new ArrayList<>(Arrays.asList(fieldData.split("],\\["))); - for (String s : fieldTemp) { - String curr = Arrays.asList(s.split(",")).get(0); - fields.add(curr.substring(1, curr.length() - 1)); // 去掉双引号 + this.metaData = new RestfulResultSetMetaData(this.database, columns); + } + + /** + * 由多个resultSet的JSON构造结果集 + * + * @param resultJson: 包含data信息的结果集,有sql返回的结果集 + * @param fieldJson: 包含多个(最多2个)meta信息的结果集,有describe xxx + **/ + public RestfulResultSet(String database, Statement statement, JSONObject resultJson, List fieldJson) { + this(database, statement, resultJson); + ArrayList newColumns = new ArrayList<>(); + + for (Field column : columns) { + Field field = findField(column.name, fieldJson); + if (field != null) { + newColumns.add(field); + } else { + newColumns.add(column); } } + this.columns = newColumns; + this.metaData = new RestfulResultSetMetaData(this.database, this.columns); + } + + public Field findField(String columnName, List fieldJsonList) { + for (JSONObject fieldJSON : fieldJsonList) { + JSONArray fieldDataJson = fieldJSON.getJSONArray("data"); + for (int i = 0; i < fieldDataJson.size(); i++) { + JSONArray field = fieldDataJson.getJSONArray(i); + if (columnName.equalsIgnoreCase(field.getString(0))) { + return new Field(field.getString(0), field.getString(1), field.getInteger(2), field.getString(3)); + } + } + } + + return null; + } + + public class Field { + String name; + String type; + int length; + String note; + + public Field(String name, String type, int length, String note) { + this.name = name; + this.type = type; + this.length = length; + this.note = note; + } } @Override public boolean next() throws SQLException { - if (isClosed) throw new SQLException(TSDBConstants.WrapErrMsg("Result is Closed!!!")); - if (pos < data.size() - 1) { + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + if (pos < resultSet.size() - 1) { pos++; return true; } @@ -50,24 +123,34 @@ public class RestfulResultSet implements ResultSet { @Override public void close() throws SQLException { - this.isClosed = true; + synchronized (RestfulResultSet.class) { + this.isClosed = true; + } } @Override public boolean wasNull() throws SQLException { - return data.isEmpty(); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + return resultSet.isEmpty(); } @Override public String getString(int columnIndex) throws SQLException { - if (columnIndex > data.get(pos).size()) { - throw new SQLException(TSDBConstants.WrapErrMsg("Column Index out of range, " + columnIndex + " > " + data.get(pos).size())); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + if (columnIndex > resultSet.get(pos).size()) { + throw new SQLException(TSDBConstants.WrapErrMsg("Column Index out of range, " + columnIndex + " > " + resultSet.get(pos).size())); } - return data.get(pos).get(columnIndex - 1); + return resultSet.get(pos).get(columnIndex - 1).toString(); } @Override public boolean getBoolean(int columnIndex) throws SQLException { + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + String result = getString(columnIndex); if (!(result.equals("true") || result.equals("false"))) { throw new SQLException("not boolean value"); @@ -77,65 +160,90 @@ public class RestfulResultSet implements ResultSet { @Override public byte getByte(int columnIndex) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public short getShort(int columnIndex) throws SQLException { + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + return Short.parseShort(getString(columnIndex)); } @Override public int getInt(int columnIndex) throws SQLException { - String result = getString(columnIndex); - return Integer.parseInt(result); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + return Integer.parseInt(getString(columnIndex)); } @Override public long getLong(int columnIndex) throws SQLException { - String result = getString(columnIndex); - return Long.parseLong(result); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + return Long.parseLong(getString(columnIndex)); } @Override public float getFloat(int columnIndex) throws SQLException { - String result = getString(columnIndex); - return Float.parseFloat(result); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + return Float.parseFloat(getString(columnIndex)); } @Override public double getDouble(int columnIndex) throws SQLException { - String result = getString(columnIndex); - return Double.parseDouble(result); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + return Double.parseDouble(getString(columnIndex)); } + /*******************************************************************************************************************/ + @Override public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public byte[] getBytes(int columnIndex) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public Date getDate(int columnIndex) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public Time getTime(int columnIndex) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public Timestamp getTimestamp(int columnIndex) throws SQLException { + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + String strDate = getString(columnIndex); strDate = strDate.substring(1, strDate.length() - 1); return Timestamp.valueOf(strDate); @@ -143,1038 +251,984 @@ public class RestfulResultSet implements ResultSet { @Override public InputStream getAsciiStream(int columnIndex) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public InputStream getUnicodeStream(int columnIndex) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public InputStream getBinaryStream(int columnIndex) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public String getString(String columnLabel) throws SQLException { + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + return getString(findColumn(columnLabel) + 1); } @Override public boolean getBoolean(String columnLabel) throws SQLException { - return Boolean.parseBoolean(getString(columnLabel)); + return getBoolean(findColumn(columnLabel)); } @Override public byte getByte(String columnLabel) throws SQLException { - return 0; + return getByte(findColumn(columnLabel)); } @Override public short getShort(String columnLabel) throws SQLException { - return Short.parseShort(getString(columnLabel)); + return getShort(findColumn(columnLabel)); } @Override public int getInt(String columnLabel) throws SQLException { - return Integer.parseInt(getString(columnLabel)); + return getInt(findColumn(columnLabel)); } @Override public long getLong(String columnLabel) throws SQLException { - return Long.parseLong(getString(columnLabel)); + return getLong(findColumn(columnLabel)); } @Override public float getFloat(String columnLabel) throws SQLException { - String result = getString(columnLabel); - return Float.parseFloat(result); + return getFloat(findColumn(columnLabel)); } @Override public double getDouble(String columnLabel) throws SQLException { - return Double.parseDouble(getString(columnLabel)); + return getDouble(findColumn(columnLabel)); } @Override public BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException { - return null; + return getBigDecimal(findColumn(columnLabel)); } @Override public byte[] getBytes(String columnLabel) throws SQLException { - return new byte[0]; + return getBytes(findColumn(columnLabel)); } @Override public Date getDate(String columnLabel) throws SQLException { - return null; + return getDate(findColumn(columnLabel)); } @Override public Time getTime(String columnLabel) throws SQLException { - return null; + return getTime(findColumn(columnLabel)); } @Override public Timestamp getTimestamp(String columnLabel) throws SQLException { - return Timestamp.valueOf(getString(columnLabel)); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + return Timestamp.valueOf(getString(findColumn(columnLabel))); } @Override public InputStream getAsciiStream(String columnLabel) throws SQLException { - return null; + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public InputStream getUnicodeStream(String columnLabel) throws SQLException { - return null; + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public InputStream getBinaryStream(String columnLabel) throws SQLException { - return null; + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public SQLWarning getWarnings() throws SQLException { + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); return null; - //TODO: SQLFeature Not Supported -// throw new SQLFeatureNotSupportedException(); } @Override public void clearWarnings() throws SQLException { + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); return; - //TODO: SQLFeature Not Supported -// throw new SQLFeatureNotSupportedException(); } @Override public String getCursorName() throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public ResultSetMetaData getMetaData() throws SQLException { - return new RestfulResultSetMetaData(fields); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + return this.metaData; } @Override public Object getObject(int columnIndex) throws SQLException { -// return null; - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public Object getObject(String columnLabel) throws SQLException { -// return null; - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + return getObject(findColumn(columnLabel)); } @Override public int findColumn(String columnLabel) throws SQLException { - return fields.indexOf(columnLabel); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + return columnNames.indexOf(columnLabel); } @Override public Reader getCharacterStream(int columnIndex) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public Reader getCharacterStream(String columnLabel) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public BigDecimal getBigDecimal(int columnIndex) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public BigDecimal getBigDecimal(String columnLabel) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public boolean isBeforeFirst() throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public boolean isAfterLast() throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public boolean isFirst() throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public boolean isLast() throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void beforeFirst() throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void afterLast() throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public boolean first() throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public boolean last() throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public int getRow() throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public boolean absolute(int row) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public boolean relative(int rows) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public boolean previous() throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void setFetchDirection(int direction) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + if (direction != ResultSet.FETCH_REVERSE || direction != ResultSet.FETCH_REVERSE || direction != ResultSet.FETCH_UNKNOWN) + throw new SQLException(TSDBConstants.INVALID_VARIABLES); + + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public int getFetchDirection() throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + return ResultSet.FETCH_FORWARD; } @Override public void setFetchSize(int rows) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + if (rows < 0) + throw new SQLException(TSDBConstants.INVALID_VARIABLES); + + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public int getFetchSize() throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + return this.resultSet.size(); } @Override public int getType() throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + return ResultSet.TYPE_FORWARD_ONLY; } @Override public int getConcurrency() throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + return ResultSet.CONCUR_READ_ONLY; } @Override public boolean rowUpdated() throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public boolean rowInserted() throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public boolean rowDeleted() throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateNull(int columnIndex) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateBoolean(int columnIndex, boolean x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateByte(int columnIndex, byte x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateShort(int columnIndex, short x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateInt(int columnIndex, int x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateLong(int columnIndex, long x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateFloat(int columnIndex, float x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateDouble(int columnIndex, double x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateString(int columnIndex, String x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateBytes(int columnIndex, byte[] x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateDate(int columnIndex, Date x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateTime(int columnIndex, Time x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateObject(int columnIndex, Object x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateNull(String columnLabel) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateBoolean(String columnLabel, boolean x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateByte(String columnLabel, byte x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateShort(String columnLabel, short x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateInt(String columnLabel, int x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateLong(String columnLabel, long x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateFloat(String columnLabel, float x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateDouble(String columnLabel, double x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateString(String columnLabel, String x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateBytes(String columnLabel, byte[] x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateDate(String columnLabel, Date x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateTime(String columnLabel, Time x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateBinaryStream(String columnLabel, InputStream x, int length) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateCharacterStream(String columnLabel, Reader reader, int length) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateObject(String columnLabel, Object x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void insertRow() throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateRow() throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void deleteRow() throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void refreshRow() throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void cancelRowUpdates() throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void moveToInsertRow() throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void moveToCurrentRow() throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public Statement getStatement() throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + if (isClosed()) + throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED)); + + return this.statement; } @Override public Object getObject(int columnIndex, Map> map) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public Ref getRef(int columnIndex) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public Blob getBlob(int columnIndex) throws SQLException { - return null; + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public Clob getClob(int columnIndex) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public Array getArray(int columnIndex) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } + /******************************************************************************************************************/ @Override public Object getObject(String columnLabel, Map> map) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public Ref getRef(String columnLabel) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public Blob getBlob(String columnLabel) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public Clob getClob(String columnLabel) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public Array getArray(String columnLabel) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public Date getDate(int columnIndex, Calendar cal) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public Date getDate(String columnLabel, Calendar cal) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public Time getTime(int columnIndex, Calendar cal) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public Time getTime(String columnLabel, Calendar cal) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public Timestamp getTimestamp(String columnLabel, Calendar cal) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public URL getURL(int columnIndex) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public URL getURL(String columnLabel) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateRef(int columnIndex, Ref x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateRef(String columnLabel, Ref x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateBlob(int columnIndex, Blob x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateBlob(String columnLabel, Blob x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateClob(int columnIndex, Clob x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateClob(String columnLabel, Clob x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateArray(int columnIndex, Array x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateArray(String columnLabel, Array x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public RowId getRowId(int columnIndex) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public RowId getRowId(String columnLabel) throws SQLException { - return null; + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateRowId(int columnIndex, RowId x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateRowId(String columnLabel, RowId x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public int getHoldability() throws SQLException { -// return 0; - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + return ResultSet.HOLD_CURSORS_OVER_COMMIT; } @Override public boolean isClosed() throws SQLException { return false; //TODO: SQLFeature Not Supported -// throw new SQLFeatureNotSupportedException(); +// throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateNString(int columnIndex, String nString) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateNString(String columnLabel, String nString) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateNClob(int columnIndex, NClob nClob) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateNClob(String columnLabel, NClob nClob) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public NClob getNClob(int columnIndex) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public NClob getNClob(String columnLabel) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public SQLXML getSQLXML(int columnIndex) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public SQLXML getSQLXML(String columnLabel) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateSQLXML(String columnLabel, SQLXML xmlObject) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public String getNString(int columnIndex) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public String getNString(String columnLabel) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public Reader getNCharacterStream(int columnIndex) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public Reader getNCharacterStream(String columnLabel) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateNCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateAsciiStream(String columnLabel, InputStream x, long length) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateBinaryStream(String columnLabel, InputStream x, long length) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateBlob(int columnIndex, InputStream inputStream, long length) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateBlob(String columnLabel, InputStream inputStream, long length) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateClob(String columnLabel, Reader reader, long length) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateNCharacterStream(String columnLabel, Reader reader) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateCharacterStream(String columnLabel, Reader reader) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateBlob(String columnLabel, InputStream inputStream) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateClob(int columnIndex, Reader reader) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateClob(String columnLabel, Reader reader) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateNClob(int columnIndex, Reader reader) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public void updateNClob(String columnLabel, Reader reader) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public T getObject(int columnIndex, Class type) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public T getObject(String columnLabel, Class type) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public T unwrap(Class iface) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public boolean isWrapperFor(Class iface) throws SQLException { - //TODO: SQLFeature Not Supported - throw new SQLFeatureNotSupportedException(); + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSetMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSetMetaData.java index 5dd61391bcf0f973726c7954e330bb5054c4e91f..1af3088b172f751ef40be5dd92b205949bc009d6 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSetMetaData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSetMetaData.java @@ -2,13 +2,15 @@ package com.taosdata.jdbc.rs; import java.sql.ResultSetMetaData; import java.sql.SQLException; -import java.util.List; +import java.util.ArrayList; public class RestfulResultSetMetaData implements ResultSetMetaData { - private List fields; + private final String database; + private ArrayList fields; - public RestfulResultSetMetaData(List fields) { + public RestfulResultSetMetaData(String database, ArrayList fields) { + this.database = database; this.fields = fields; } @@ -24,6 +26,7 @@ public class RestfulResultSetMetaData implements ResultSetMetaData { @Override public boolean isCaseSensitive(int column) throws SQLException { + //TODO return false; } @@ -39,7 +42,7 @@ public class RestfulResultSetMetaData implements ResultSetMetaData { @Override public int isNullable(int column) throws SQLException { - return 0; + return ResultSetMetaData.columnNullable; } @Override @@ -54,7 +57,7 @@ public class RestfulResultSetMetaData implements ResultSetMetaData { @Override public String getColumnLabel(int column) throws SQLException { - return fields.get(column - 1); + return fields.get(column - 1).name; } @Override @@ -64,7 +67,7 @@ public class RestfulResultSetMetaData implements ResultSetMetaData { @Override public String getSchemaName(int column) throws SQLException { - return null; + return this.database; } @Override @@ -84,7 +87,7 @@ public class RestfulResultSetMetaData implements ResultSetMetaData { @Override public String getCatalogName(int column) throws SQLException { - return null; + return this.database; } @Override diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java index 30b56638d880137b7a3394e5282a3edf5178d2b0..8b2276fbb055cd18c86173f387fb7c93e296fe43 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java @@ -7,20 +7,60 @@ import com.taosdata.jdbc.rs.util.HttpClientPoolUtil; import com.taosdata.jdbc.utils.SqlSyntaxValidator; import java.sql.*; +import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.stream.Collectors; public class RestfulStatement implements Statement { + private static final String STATEMENT_CLOSED = "Statement already closed."; private boolean closed; private String database; private final RestfulConnection conn; - public RestfulStatement(RestfulConnection c, String database) { - this.conn = c; + private volatile RestfulResultSet resultSet; + private volatile int affectedRows; + private volatile boolean closeOnCompletion; + + public RestfulStatement(RestfulConnection conn, String database) { + this.conn = conn; this.database = database; } + private String[] parseTableIdentifier(String sql) { + sql = sql.trim().toLowerCase(); + String[] ret = null; + if (sql.contains("where")) + sql = sql.substring(0, sql.indexOf("where")); + if (sql.contains("interval")) + sql = sql.substring(0, sql.indexOf("interval")); + if (sql.contains("fill")) + sql = sql.substring(0, sql.indexOf("fill")); + if (sql.contains("sliding")) + sql = sql.substring(0, sql.indexOf("sliding")); + if (sql.contains("group by")) + sql = sql.substring(0, sql.indexOf("group by")); + if (sql.contains("order by")) + sql = sql.substring(0, sql.indexOf("order by")); + if (sql.contains("slimit")) + sql = sql.substring(0, sql.indexOf("slimit")); + if (sql.contains("limit")) + sql = sql.substring(0, sql.indexOf("limit")); + // parse + if (sql.contains("from")) { + sql = sql.substring(sql.indexOf("from") + 4).trim(); + return Arrays.asList(sql.split(",")).stream() + .map(tableIdentifier -> { + tableIdentifier = tableIdentifier.trim(); + if (tableIdentifier.contains(" ")) + tableIdentifier = tableIdentifier.substring(0, tableIdentifier.indexOf(" ")); + return tableIdentifier; + }).collect(Collectors.joining(",")).split(","); + } + return ret; + } + @Override public ResultSet executeQuery(String sql) throws SQLException { if (isClosed()) @@ -29,43 +69,33 @@ public class RestfulStatement implements Statement { throw new SQLException("not a select sql for executeQuery: " + sql); final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql"; + // row data String result = HttpClientPoolUtil.execute(url, sql); - String fields = ""; - List words = Arrays.asList(sql.split(" ")); - if (words.get(0).equalsIgnoreCase("select")) { - int index = 0; - if (words.contains("from")) { - index = words.indexOf("from"); - } - if (words.contains("FROM")) { - index = words.indexOf("FROM"); - } - fields = HttpClientPoolUtil.execute(url, "DESCRIBE " + words.get(index + 1)); - } - - JSONObject jsonObject = JSON.parseObject(result); - if (jsonObject.getString("status").equals("error")) { - throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + - jsonObject.getString("desc") + "\n" + - "error code: " + jsonObject.getString("code"))); - } - String dataStr = jsonObject.getString("data"); - if ("use".equalsIgnoreCase(fields.split(" ")[0])) { - return new RestfulResultSet(dataStr, ""); + JSONObject resultJson = JSON.parseObject(result); + if (resultJson.getString("status").equals("error")) { + throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + resultJson.getString("desc") + "\n" + "error code: " + resultJson.getString("code"))); } - JSONObject jsonField = JSON.parseObject(fields); - if (jsonField == null) { - return new RestfulResultSet(dataStr, ""); - } - if (jsonField.getString("status").equals("error")) { - throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + - jsonField.getString("desc") + "\n" + - "error code: " + jsonField.getString("code"))); + // parse table name from sql + String[] tableIdentifiers = parseTableIdentifier(sql); + if (tableIdentifiers != null) { + List fieldJsonList = new ArrayList<>(); + for (String tableIdentifier : tableIdentifiers) { + // field meta + String fields = HttpClientPoolUtil.execute(url, "DESCRIBE " + tableIdentifier); + JSONObject fieldJson = JSON.parseObject(fields); + if (fieldJson.getString("status").equals("error")) { + throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + fieldJson.getString("desc") + "\n" + "error code: " + fieldJson.getString("code"))); + } + fieldJsonList.add(fieldJson); + } + this.resultSet = new RestfulResultSet(database, this, resultJson, fieldJsonList); + } else { + this.resultSet = new RestfulResultSet(database, this, resultJson); } - String fieldData = jsonField.getString("data"); - return new RestfulResultSet(dataStr, fieldData); + this.affectedRows = 0; + return resultSet; } @Override @@ -78,77 +108,103 @@ public class RestfulStatement implements Statement { if (this.database == null) throw new SQLException("Database not specified or available"); - final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql"; - HttpClientPoolUtil.execute(url, "use " + conn.getDatabase()); + final String url = "http://" + conn.getHost().trim() + ":" + conn.getPort() + "/rest/sql"; +// HttpClientPoolUtil.execute(url, "use " + conn.getDatabase()); String result = HttpClientPoolUtil.execute(url, sql); JSONObject jsonObject = JSON.parseObject(result); if (jsonObject.getString("status").equals("error")) { - throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + - jsonObject.getString("desc") + "\n" + - "error code: " + jsonObject.getString("code"))); + throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + jsonObject.getString("desc") + "\n" + "error code: " + jsonObject.getString("code"))); } - return Integer.parseInt(jsonObject.getString("rows")); + this.resultSet = null; + this.affectedRows = Integer.parseInt(jsonObject.getString("rows")); + return this.affectedRows; } @Override public void close() throws SQLException { - this.closed = true; + synchronized (RestfulStatement.class) { + if (!isClosed()) + this.closed = true; + } } @Override public int getMaxFieldSize() throws SQLException { - return 0; + if (isClosed()) + throw new SQLException(STATEMENT_CLOSED); + return TSDBConstants.maxFieldSize; } @Override public void setMaxFieldSize(int max) throws SQLException { - + if (isClosed()) + throw new SQLException(STATEMENT_CLOSED); + if (max < 0) + throw new SQLException(TSDBConstants.INVALID_VARIABLES); + // nothing to do } @Override public int getMaxRows() throws SQLException { + if (isClosed()) + throw new SQLException(STATEMENT_CLOSED); return 0; } @Override public void setMaxRows(int max) throws SQLException { - + if (isClosed()) + throw new SQLException(STATEMENT_CLOSED); + if (max < 0) + throw new SQLException(TSDBConstants.INVALID_VARIABLES); + // nothing to do } @Override public void setEscapeProcessing(boolean enable) throws SQLException { - + if (isClosed()) + throw new SQLException(RestfulStatement.STATEMENT_CLOSED); } @Override public int getQueryTimeout() throws SQLException { + if (isClosed()) + throw new SQLException(STATEMENT_CLOSED); return 0; } @Override public void setQueryTimeout(int seconds) throws SQLException { - + if (isClosed()) + throw new SQLException(STATEMENT_CLOSED); + if (seconds < 0) + throw new SQLException(TSDBConstants.INVALID_VARIABLES); } @Override public void cancel() throws SQLException { - + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public SQLWarning getWarnings() throws SQLException { - //TODO: getWarnings not Implemented + if (isClosed()) + throw new SQLException(STATEMENT_CLOSED); return null; } @Override public void clearWarnings() throws SQLException { - + // nothing to do + if (isClosed()) + throw new SQLException(STATEMENT_CLOSED); } @Override public void setCursorName(String name) throws SQLException { - + if (isClosed()) + throw new SQLException(RestfulStatement.STATEMENT_CLOSED); + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override @@ -159,133 +215,181 @@ public class RestfulStatement implements Statement { //如果执行了use操作应该将当前Statement的catalog设置为新的database if (SqlSyntaxValidator.isUseSql(sql)) { this.database = sql.trim().replace("use", "").trim(); + this.conn.setCatalog(this.database); } if (this.database == null) throw new SQLException("Database not specified or available"); - final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql"; - // use database - HttpClientPoolUtil.execute(url, "use " + conn.getDatabase()); - // execute sql - String result = HttpClientPoolUtil.execute(url, sql); - // parse result - JSONObject jsonObject = JSON.parseObject(result); - if (jsonObject.getString("status").equals("error")) { - throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + - jsonObject.getString("desc") + "\n" + - "error code: " + jsonObject.getString("code"))); + if (SqlSyntaxValidator.isSelectSql(sql)) { + executeQuery(sql); + } else if (SqlSyntaxValidator.isShowSql(sql) || SqlSyntaxValidator.isDescribeSql(sql)) { + final String url = "http://" + conn.getHost().trim() + ":" + conn.getPort() + "/rest/sql"; + if (!SqlSyntaxValidator.isShowDatabaseSql(sql)) { + HttpClientPoolUtil.execute(url, "use " + conn.getDatabase()); + } + String result = HttpClientPoolUtil.execute(url, sql); + JSONObject resultJson = JSON.parseObject(result); + if (resultJson.getString("status").equals("error")) { + throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + resultJson.getString("desc") + "\n" + "error code: " + resultJson.getString("code"))); + } + this.resultSet = new RestfulResultSet(database, this, resultJson); + } else { + executeUpdate(sql); } + return true; } @Override public ResultSet getResultSet() throws SQLException { - return null; + if (isClosed()) + throw new SQLException(STATEMENT_CLOSED); + return resultSet; } @Override public int getUpdateCount() throws SQLException { - return 0; + if (isClosed()) { + throw new SQLException("Invalid method call on a closed statement."); + } + return this.affectedRows; } @Override public boolean getMoreResults() throws SQLException { - return false; + return getMoreResults(CLOSE_CURRENT_RESULT); } @Override public void setFetchDirection(int direction) throws SQLException { - + if (direction != ResultSet.FETCH_FORWARD && direction != ResultSet.FETCH_REVERSE && direction != ResultSet.FETCH_UNKNOWN) + throw new SQLException(TSDBConstants.INVALID_VARIABLES); + this.resultSet.setFetchDirection(direction); } @Override public int getFetchDirection() throws SQLException { - return 0; + return this.resultSet.getFetchDirection(); } @Override public void setFetchSize(int rows) throws SQLException { - + if (isClosed()) + throw new SQLException(STATEMENT_CLOSED); + if (rows < 0) + throw new SQLException(TSDBConstants.INVALID_VARIABLES); + //nothing to do } @Override public int getFetchSize() throws SQLException { + if (isClosed()) + throw new SQLException(STATEMENT_CLOSED); return 0; } @Override public int getResultSetConcurrency() throws SQLException { - return 0; + if (isClosed()) + throw new SQLException(STATEMENT_CLOSED); + return this.resultSet.getConcurrency(); } @Override public int getResultSetType() throws SQLException { - return 0; + if (isClosed()) + throw new SQLException(STATEMENT_CLOSED); + return this.resultSet.getType(); } @Override public void addBatch(String sql) throws SQLException { - + if (isClosed()) + throw new SQLException(STATEMENT_CLOSED); + //TODO: } @Override public void clearBatch() throws SQLException { - + //TODO: } @Override public int[] executeBatch() throws SQLException { + //TODO: return new int[0]; } @Override public Connection getConnection() throws SQLException { - return null; + if (isClosed()) + throw new SQLException(STATEMENT_CLOSED); + return this.conn; } @Override public boolean getMoreResults(int current) throws SQLException { + if (isClosed()) + throw new SQLException(STATEMENT_CLOSED); + if (resultSet == null) + return false; + +// switch (current) { +// case CLOSE_CURRENT_RESULT: +// resultSet.close(); +// break; +// case KEEP_CURRENT_RESULT: +// break; +// case CLOSE_ALL_RESULTS: +// resultSet.close(); +// break; +// default: +// throw new SQLException(TSDBConstants.INVALID_VARIABLES); +// } +// return next; return false; } @Override public ResultSet getGeneratedKeys() throws SQLException { - return null; + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { - return 0; + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { - return 0; + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public int executeUpdate(String sql, String[] columnNames) throws SQLException { - return 0; + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { - return false; + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public boolean execute(String sql, int[] columnIndexes) throws SQLException { - return false; + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public boolean execute(String sql, String[] columnNames) throws SQLException { - return false; + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } @Override public int getResultSetHoldability() throws SQLException { - return 0; + if (isClosed()) + throw new SQLException(STATEMENT_CLOSED); + return this.resultSet.getHoldability(); } @Override @@ -295,22 +399,30 @@ public class RestfulStatement implements Statement { @Override public void setPoolable(boolean poolable) throws SQLException { - + if (isClosed()) + throw new SQLException(STATEMENT_CLOSED); + //nothing to do } @Override public boolean isPoolable() throws SQLException { + if (isClosed()) + throw new SQLException(STATEMENT_CLOSED); return false; } @Override public void closeOnCompletion() throws SQLException { - + if (isClosed()) + throw new SQLException(STATEMENT_CLOSED); + this.closeOnCompletion = true; } @Override public boolean isCloseOnCompletion() throws SQLException { - return false; + if (isClosed()) + throw new SQLException(STATEMENT_CLOSED); + return this.closeOnCompletion; } @Override diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/util/HttpClientPoolUtil.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/util/HttpClientPoolUtil.java index 65399b122d97254b88b6bc2ef08910d7badc5061..23e8796980ac419635ef6953deff1033d158962e 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/util/HttpClientPoolUtil.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/util/HttpClientPoolUtil.java @@ -17,6 +17,8 @@ import org.apache.http.protocol.HTTP; import org.apache.http.protocol.HttpContext; import org.apache.http.util.EntityUtils; +import java.nio.charset.Charset; + public class HttpClientPoolUtil { public static PoolingHttpClientConnectionManager cm = null; @@ -94,7 +96,9 @@ public class HttpClientPoolUtil { initPools(); } method = (HttpEntityEnclosingRequestBase) getRequest(uri, HttpPost.METHOD_NAME, DEFAULT_CONTENT_TYPE, 0); - method.setEntity(new StringEntity(data)); + method.setHeader("Authorization", "Basic cm9vdDp0YW9zZGF0YQ=="); + method.setHeader("Content-Type", "text/plain"); + method.setEntity(new StringEntity(data, Charset.forName("UTF-8"))); HttpContext context = HttpClientContext.create(); CloseableHttpResponse httpResponse = httpClient.execute(method, context); httpEntity = httpResponse.getEntity(); @@ -105,26 +109,13 @@ public class HttpClientPoolUtil { if (method != null) { method.abort(); } -// e.printStackTrace(); -// logger.error("execute post request exception, url:" + uri + ", exception:" + e.toString() -// + ", cost time(ms):" + (System.currentTimeMillis() - startTime)); - new Exception("execute post request exception, url:" - + uri + ", exception:" + e.toString() + - ", cost time(ms):" + (System.currentTimeMillis() - startTime)) - .printStackTrace(); + new Exception("execute post request exception, url:" + uri + ", exception:" + e.toString() + ", cost time(ms):" + (System.currentTimeMillis() - startTime)).printStackTrace(); } finally { if (httpEntity != null) { try { EntityUtils.consumeQuietly(httpEntity); } catch (Exception e) { -// e.printStackTrace(); -// logger.error("close response exception, url:" + uri + ", exception:" + e.toString() -// + ", cost time(ms):" + (System.currentTimeMillis() - startTime)); - new Exception( - "close response exception, url:" + uri + - ", exception:" + e.toString() - + ", cost time(ms):" + (System.currentTimeMillis() - startTime)) - .printStackTrace(); + new Exception("close response exception, url:" + uri + ", exception:" + e.toString() + ", cost time(ms):" + (System.currentTimeMillis() - startTime)).printStackTrace(); } } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/SqlSyntaxValidator.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/SqlSyntaxValidator.java index 388c3978bef3372e6abca89a70bd428a8bb79d3d..f0d92346167411ca6da78015392ba8a21286e1cf 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/SqlSyntaxValidator.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/SqlSyntaxValidator.java @@ -15,14 +15,12 @@ package com.taosdata.jdbc.utils; import com.taosdata.jdbc.TSDBConnection; -import com.taosdata.jdbc.TSDBJNIConnector; import java.sql.Connection; -import java.sql.SQLException; public class SqlSyntaxValidator { - private static final String[] updateSQL = {"insert", "update", "delete", "create", "alter", "drop", "show", "describe", "use"}; + private static final String[] updateSQL = {"insert", "update", "delete", "create", "alter", "drop", "show", "describe", "use", "import"}; private static final String[] querySQL = {"select"}; private TSDBConnection tsdbConnection; @@ -31,22 +29,6 @@ public class SqlSyntaxValidator { this.tsdbConnection = (TSDBConnection) connection; } - public boolean validateSqlSyntax(String sql) throws SQLException { - - boolean res = false; - if (tsdbConnection == null || tsdbConnection.isClosed()) { - throw new SQLException("invalid connection"); - } else { - TSDBJNIConnector jniConnector = tsdbConnection.getConnection(); - if (jniConnector == null) { - throw new SQLException("jniConnector is null"); - } else { - res = jniConnector.validateCreateTableSql(sql); - } - } - return res; - } - public static boolean isValidForExecuteUpdate(String sql) { for (String prefix : updateSQL) { if (sql.trim().toLowerCase().startsWith(prefix)) @@ -56,18 +38,28 @@ public class SqlSyntaxValidator { } public static boolean isUseSql(String sql) { - return sql.trim().toLowerCase().startsWith(updateSQL[8]) || sql.trim().toLowerCase().matches("create\\s*database.*") || sql.toLowerCase().toLowerCase().matches("drop\\s*database.*"); + return sql.trim().toLowerCase().startsWith("use") || sql.trim().toLowerCase().matches("create\\s*database.*") || sql.toLowerCase().toLowerCase().matches("drop\\s*database.*"); + } + + public static boolean isShowSql(String sql) { + return sql.trim().toLowerCase().startsWith("show"); } - public static boolean isUpdateSql(String sql) { - return sql.trim().toLowerCase().startsWith(updateSQL[1]); + public static boolean isDescribeSql(String sql) { + return sql.trim().toLowerCase().startsWith("describe"); } + public static boolean isInsertSql(String sql) { - return sql.trim().toLowerCase().startsWith(updateSQL[0]); + return sql.trim().toLowerCase().startsWith("insert") || sql.trim().toLowerCase().startsWith("import"); } public static boolean isSelectSql(String sql) { - return sql.trim().toLowerCase().startsWith(querySQL[0]); + return sql.trim().toLowerCase().startsWith("select"); + } + + + public static boolean isShowDatabaseSql(String sql) { + return sql.trim().toLowerCase().matches("show\\s*databases"); } } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/DatabaseMetaDataTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/DatabaseMetaDataTest.java index 284af3dfe75f7c436ec2ce875714afc235c525d8..19dabe07462b6481b2ab5460aad7ac5712e21b7c 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/DatabaseMetaDataTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/DatabaseMetaDataTest.java @@ -7,7 +7,7 @@ import org.junit.Test; import java.sql.*; import java.util.Properties; -public class DatabaseMetaDataTest extends BaseTest { +public class DatabaseMetaDataTest { static Connection connection = null; static PreparedStatement statement = null; static String dbName = "test"; @@ -23,20 +23,21 @@ public class DatabaseMetaDataTest extends BaseTest { } Properties properties = new Properties(); properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host); + properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata"); properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties); String sql = "drop database if exists " + dbName; - statement = (TSDBPreparedStatement) connection.prepareStatement(sql); + statement = connection.prepareStatement(sql); statement.executeUpdate("create database if not exists " + dbName); statement.executeUpdate("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)"); } @Test public void testMetaDataTest() throws SQLException { - DatabaseMetaData databaseMetaData = connection.getMetaData(); ResultSet resultSet = databaseMetaData.getTables(dbName, "t*", "t*", new String[]{"t"}); while (resultSet.next()) { @@ -180,7 +181,7 @@ public class DatabaseMetaDataTest extends BaseTest { databaseMetaData.getCatalogs(); // databaseMetaData.getTableTypes(); - databaseMetaData.getColumns("", "", "", ""); + databaseMetaData.getColumns(dbName, "", tName, ""); databaseMetaData.getColumnPrivileges("", "", "", ""); databaseMetaData.getTablePrivileges("", "", ""); databaseMetaData.getBestRowIdentifier("", "", "", 0, false); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AppMemoryLeakTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AppMemoryLeakTest.java index 8de2e3b442b9b2ea1b84687878ba3bb4faf5e4d6..19bc5f713f9b406a943fc640fd03bb0503ed2967 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AppMemoryLeakTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AppMemoryLeakTest.java @@ -19,7 +19,7 @@ public class AppMemoryLeakTest { } } - @Test + @Test(expected = Exception.class) public void testCreateTooManyStatement() throws ClassNotFoundException, SQLException { Class.forName("com.taosdata.jdbc.TSDBDriver"); int stmtCnt = 0; @@ -30,15 +30,4 @@ public class AppMemoryLeakTest { } } - public static void main(String[] args) throws ClassNotFoundException, SQLException { - Class.forName("com.taosdata.jdbc.TSDBDriver"); - int stmtCnt = 0; - Connection conn = DriverManager.getConnection("jdbc:TAOS://localhost:6030/?user=root&password=taosdata"); - while (true) { - Statement stmt = conn.createStatement(); - System.out.println(++stmtCnt + " : " + stmt); - } - } - - } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ConnectWrongDatabaseTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ConnectWrongDatabaseTest.java new file mode 100644 index 0000000000000000000000000000000000000000..e4d2d7598d6a6bd5610abcb1bd576f523cb22740 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ConnectWrongDatabaseTest.java @@ -0,0 +1,24 @@ +package com.taosdata.jdbc.cases; + +import org.junit.Assert; +import org.junit.Test; + +import java.sql.DriverManager; +import java.sql.SQLException; + +public class ConnectWrongDatabaseTest { + + @Test + public void connect() { + try { + Class.forName("com.taosdata.jdbc.TSDBDriver"); + DriverManager.getConnection("jdbc:TAOS://localhost:6030/wrong_db?user=root&password=taosdata"); + } catch (ClassNotFoundException e) { + e.printStackTrace(); + } catch (SQLException e) { + System.out.println(e.getMessage()); + Assert.assertEquals("TDengine Error: Invalid database name", e.getMessage()); + } + } + +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulJDBCTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulJDBCTest.java index d13475b96dfc96780faaae7b01275f717f8befbf..0af6b91532b63773510e10806d8f1e744ffe966e 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulJDBCTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulJDBCTest.java @@ -1,5 +1,6 @@ package com.taosdata.jdbc.rs; + import org.junit.*; import org.junit.runners.MethodSorters; diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/SQLTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/SQLTest.java new file mode 100644 index 0000000000000000000000000000000000000000..8ff308f8546b39519b9b24412e772a60a3ef68f0 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/SQLTest.java @@ -0,0 +1,399 @@ +package com.taosdata.jdbc.rs; + +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.FixMethodOrder; +import org.junit.Test; +import org.junit.runners.MethodSorters; + +import java.sql.*; + +@FixMethodOrder(MethodSorters.NAME_ASCENDING) +public class SQLTest { + private static final String host = "master"; + + private static Connection connection; + + @Test + public void testCase001() { + String sql = "create database if not exists restful_test"; + execute(sql); + } + + @Test + public void testCase002() { + String sql = "use restful_test"; + execute(sql); + } + + @Test + public void testCase003() { + String sql = "show databases"; + executeWithResult(sql); + } + + @Test + public void testCase004() { + String sql = "show tables"; + executeWithResult(sql); + } + + @Test + public void testCase005() { + String sql = "show stables"; + executeWithResult(sql); + } + + @Test + public void testCase006() { + String sql = "show dnodes"; + executeWithResult(sql); + } + + @Test + public void testCase007() { + String sql = "show vgroups"; + executeWithResult(sql); + } + + @Test + public void testCase008() { + String sql = "drop table if exists restful_test.weather"; + execute(sql); + } + + @Test + public void testCase009() { + String sql = "create table if not exists restful_test.weather(ts timestamp, temperature float) tags(location nchar(64))"; + execute(sql); + } + + @Test + public void testCase010() { + String sql = "create table t1 using restful_test.weather tags('北京')"; + execute(sql); + } + + @Test + public void testCase011() { + String sql = "insert into restful_test.t1 values(now, 22.22)"; + executeUpdate(sql); + } + + @Test + public void testCase012() { + String sql = "insert into restful_test.t1 values('2020-01-01 00:00:00.000', 22.22)"; + executeUpdate(sql); + } + + @Test + public void testCase013() { + String sql = "insert into restful_test.t1 values('2020-01-01 00:01:00.000', 22.22),('2020-01-01 00:02:00.000', 22.22)"; + executeUpdate(sql); + } + + @Test + public void testCase014() { + String sql = "insert into restful_test.t2 using weather tags('上海') values('2020-01-01 00:03:00.000', 22.22)"; + executeUpdate(sql); + } + + @Test + public void testCase015() { + String sql = "insert into restful_test.t2 using weather tags('上海') values('2020-01-01 00:01:00.000', 22.22),('2020-01-01 00:02:00.000', 22.22)"; + executeUpdate(sql); + } + + @Test + public void testCase016() { + String sql = "insert into t1 values('2020-01-01 01:0:00.000', 22.22),('2020-01-01 02:00:00.000', 22.22) t2 values('2020-01-01 01:0:00.000', 33.33),('2020-01-01 02:00:00.000', 33.33)"; + executeUpdate(sql); + } + + @Test + public void testCase017() { + String sql = "Insert into t3 using weather tags('广东') values('2020-01-01 01:0:00.000', 22.22),('2020-01-01 02:00:00.000', 22.22) t4 using weather tags('天津') values('2020-01-01 01:0:00.000', 33.33),('2020-01-01 02:00:00.000', 33.33)"; + executeUpdate(sql); + } + + @Test + public void testCase018() { + String sql = "select * from restful_test.t1"; + executeQuery(sql); + } + + @Test + public void testCase019() { + String sql = "select * from restful_test.weather"; + executeQuery(sql); + } + + @Test + public void testCase020() { + String sql = "select ts, temperature from restful_test.t1"; + executeQuery(sql); + } + + @Test + public void testCase021() { + String sql = "select ts, temperature from restful_test.weather"; + executeQuery(sql); + } + + @Test + public void testCase022() { + String sql = "select temperature, ts from restful_test.t1"; + executeQuery(sql); + } + + @Test + public void testCase023() { + String sql = "select temperature, ts from restful_test.weather"; + executeQuery(sql); + } + + @Test + public void testCase024() { + String sql = "import into restful_test.t5 using weather tags('石家庄') values('2020-01-01 00:01:00.000', 22.22)"; + executeUpdate(sql); + } + + @Test + public void testCase025() { + String sql = "import into restful_test.t6 using weather tags('沈阳') values('2020-01-01 00:01:00.000', 22.22),('2020-01-01 00:02:00.000', 22.22)"; + executeUpdate(sql); + } + + @Test + public void testCase026() { + String sql = "import into restful_test.t7 using weather tags('长沙') values('2020-01-01 00:01:00.000', 22.22) restful_test.t8 using weather tags('吉林') values('2020-01-01 00:01:00.000', 22.22)"; + executeUpdate(sql); + } + + @Test + public void testCase027() { + String sql = "import into restful_test.t9 using weather tags('武汉') values('2020-01-01 00:01:00.000', 22.22) ,('2020-01-02 00:01:00.000', 22.22) restful_test.t10 using weather tags('哈尔滨') values('2020-01-01 00:01:00.000', 22.22),('2020-01-02 00:01:00.000', 22.22)"; + executeUpdate(sql); + } + + @Test + public void testCase028() { + String sql = "select location, temperature, ts from restful_test.weather where temperature > 1"; + executeQuery(sql); + } + + @Test + public void testCase029() { + String sql = "select location, temperature, ts from restful_test.weather where temperature < 1"; + executeQuery(sql); + } + + @Test + public void testCase030() { + String sql = "select location, temperature, ts from restful_test.weather where ts > now"; + executeQuery(sql); + } + + @Test + public void testCase031() { + String sql = "select location, temperature, ts from restful_test.weather where ts < now"; + executeQuery(sql); + } + + @Test + public void testCase032() { + String sql = "select count(*) from restful_test.weather"; + executeQuery(sql); + } + + @Test + public void testCase033() { + String sql = "select first(*) from restful_test.weather"; + executeQuery(sql); + } + + @Test + public void testCase034() { + String sql = "select last(*) from restful_test.weather"; + executeQuery(sql); + } + + @Test + public void testCase035() { + String sql = "select last_row(*) from restful_test.weather"; + executeQuery(sql); + } + + @Test + public void testCase036() { + String sql = "select ts, ts as primary_key from restful_test.weather"; + executeQuery(sql); + } + + + @Test + public void testCase037() { + String sql = "select database()"; + execute("use restful_test"); + executeQuery(sql); + } + + @Test + public void testCase038() { + String sql = "select client_version()"; + executeQuery(sql); + } + + @Test + public void testCase039() { + String sql = "select server_status()"; + executeQuery(sql); + } + + @Test + public void testCase040() { + String sql = "select server_status() as status"; + executeQuery(sql); + } + + @Test + public void testCase041() { + String sql = "select tbname, location from restful_test.weather"; + executeQuery(sql); + } + + @Test + public void testCase042() { + String sql = "select count(tbname) from restful_test.weather"; + executeQuery(sql); + } + + @Test + public void testCase043() { + String sql = "select * from restful_test.weather where ts < now - 1h"; + executeQuery(sql); + } + + @Test + public void testCase044() { + String sql = "select * from restful_test.weather where ts < now - 1h and location like '%'"; + executeQuery(sql); + } + + @Test + public void testCase045() { + String sql = "select * from restful_test.weather where ts < now - 1h order by ts"; + executeQuery(sql); + } + + @Test + public void testCase046() { + String sql = "select last(*) from restful_test.weather where ts < now - 1h group by tbname order by tbname"; + executeQuery(sql); + } + + @Test + public void testCase047() { + String sql = "select * from restful_test.weather limit 2"; + executeQuery(sql); + } + + @Test + public void testCase048() { + String sql = "select * from restful_test.weather limit 2 offset 5"; + executeQuery(sql); + } + + @Test + public void testCase049() { + String sql = "select * from restful_test.t1, restful_test.t3 where t1.ts = t3.ts "; + executeQuery(sql); + } + + @Test + public void testCase050() { + String sql = "select * from restful_test.t1, restful_test.t3 where t1.ts = t3.ts and t1.location = t3.location"; + executeQuery(sql); + } + + @Test + public void testCase051() { + String sql = "select * from restful_test.t1 tt, restful_test.t3 yy where tt.ts = yy.ts"; + executeQuery(sql); + } + + private void executeUpdate(String sql) { + try (Statement statement = connection.createStatement()) { + long start = System.currentTimeMillis(); + int affectedRows = statement.executeUpdate(sql); + long end = System.currentTimeMillis(); + System.out.println("[ affected rows : " + affectedRows + " ] time cost: " + (end - start) + " ms, execute statement ====> " + sql); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + private void executeWithResult(String sql) { + try (Statement statement = connection.createStatement()) { + statement.execute(sql); + ResultSet resultSet = statement.getResultSet(); + printResult(resultSet); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + private void execute(String sql) { + try (Statement statement = connection.createStatement()) { + long start = System.currentTimeMillis(); + boolean execute = statement.execute(sql); + long end = System.currentTimeMillis(); + printSql(sql, execute, (end - start)); + } catch (SQLException e) { + System.out.println("ERROR execute SQL ===> " + sql); + e.printStackTrace(); + } + } + + private static void printSql(String sql, boolean succeed, long cost) { + System.out.println("[ " + (succeed ? "OK" : "ERROR!") + " ] time cost: " + cost + " ms, execute statement ====> " + sql); + } + + private void executeQuery(String sql) { + try (Statement statement = connection.createStatement()) { + long start = System.currentTimeMillis(); + ResultSet resultSet = statement.executeQuery(sql); + long end = System.currentTimeMillis(); + printSql(sql, true, (end - start)); + printResult(resultSet); + } catch (SQLException e) { + System.out.println("ERROR execute SQL ===> " + sql); + e.printStackTrace(); + } + } + + private static void printResult(ResultSet resultSet) throws SQLException { + ResultSetMetaData metaData = resultSet.getMetaData(); + while (resultSet.next()) { + StringBuilder sb = new StringBuilder(); + for (int i = 1; i <= metaData.getColumnCount(); i++) { + String columnLabel = metaData.getColumnLabel(i); + String value = resultSet.getString(i); + sb.append(columnLabel + ": " + value + "\t"); + } + System.out.println(sb.toString()); + } + } + + @BeforeClass + public static void before() throws ClassNotFoundException, SQLException { + Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); + connection = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/restful_test?user=root&password=taosdata"); + } + + @AfterClass + public static void after() throws SQLException { + connection.close(); + } + +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/SqlSyntaxValidatorTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/SqlSyntaxValidatorTest.java index fb570e16c4e0ef8036ccd2c29cdc51e1938b2139..ce84f967d09e2217bac90cc7a3f59bf4f50b2a15 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/SqlSyntaxValidatorTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/SqlSyntaxValidatorTest.java @@ -5,10 +5,6 @@ import org.junit.Test; public class SqlSyntaxValidatorTest { - @Test - public void validateSqlSyntax() { - } - @Test public void isSelectSQL() { Assert.assertTrue(SqlSyntaxValidator.isSelectSql("select * from test.weather")); diff --git a/src/connector/nodejs/nodetaos/cinterface.js b/src/connector/nodejs/nodetaos/cinterface.js index 995babdb2b7827267d6a4b68cd9d380671823543..a246256f15f313b61c0b7e990e63848fee2431bf 100644 --- a/src/connector/nodejs/nodetaos/cinterface.js +++ b/src/connector/nodejs/nodetaos/cinterface.js @@ -4,6 +4,7 @@ */ const ref = require('ref'); +const os = require('os'); const ffi = require('ffi'); const ArrayType = require('ref-array'); const Struct = require('ref-struct'); @@ -188,7 +189,13 @@ function CTaosInterface (config = null, pass = false) { ref.types.void_ptr2 = ref.refType(ref.types.void_ptr); /*Declare a bunch of functions first*/ /* Note, pointers to TAOS_RES, TAOS, are ref.types.void_ptr. The connection._conn buffer is supplied for pointers to TAOS * */ - this.libtaos = ffi.Library('libtaos', { + + if ('win32' == os.platform()) { + taoslibname = 'taos'; + } else { + taoslibname = 'libtaos'; + } + this.libtaos = ffi.Library(taoslibname, { 'taos_options': [ ref.types.int, [ ref.types.int , ref.types.void_ptr ] ], 'taos_init': [ ref.types.void, [ ] ], //TAOS *taos_connect(char *ip, char *user, char *pass, char *db, int port) diff --git a/src/connector/nodejs/package.json b/src/connector/nodejs/package.json index 2d5cf45e1db9a65ce86d998afea6cd689bf63317..5587a69e01e6743a7eb7ef234d111c86db246841 100644 --- a/src/connector/nodejs/package.json +++ b/src/connector/nodejs/package.json @@ -1,6 +1,6 @@ { "name": "td2.0-connector", - "version": "2.0.4", + "version": "2.0.5", "description": "A Node.js connector for TDengine.", "main": "tdengine.js", "scripts": { diff --git a/src/dnode/inc/dnodeModule.h b/src/dnode/inc/dnodeModule.h index e645784c8fd7aced259185f45402f9839842b9aa..863ea433c46c175ee7cb6ad46e6dd38f164d36b1 100644 --- a/src/dnode/inc/dnodeModule.h +++ b/src/dnode/inc/dnodeModule.h @@ -23,8 +23,7 @@ extern "C" { int32_t dnodeInitModules(); void dnodeCleanupModules(); -bool dnodeStartMnode(SMInfos *pMinfos); -void dnodeProcessModuleStatus(uint32_t moduleStatus); +int32_t dnodeStartMnode(SMInfos *pMinfos); #ifdef __cplusplus } diff --git a/src/dnode/src/dnodeMWrite.c b/src/dnode/src/dnodeMWrite.c index ea1cf028c5ea96f6f1265b88c47cbdb9caa10584..8c9e22ef4b19133863f32140fb6ddae292caf5a9 100644 --- a/src/dnode/src/dnodeMWrite.c +++ b/src/dnode/src/dnodeMWrite.c @@ -184,7 +184,19 @@ void dnodeReprocessMWriteMsg(void *pMsg) { dDebug("msg:%p, app:%p type:%s is redirected for mnode not running, retry times:%d", pWrite, pWrite->rpcMsg.ahandle, taosMsg[pWrite->rpcMsg.msgType], pWrite->retry); - dnodeSendRedirectMsg(pMsg, true); + if (pWrite->pBatchMasterMsg) { + ++pWrite->pBatchMasterMsg->received; + if (pWrite->pBatchMasterMsg->successed + pWrite->pBatchMasterMsg->received + >= pWrite->pBatchMasterMsg->expected) { + dnodeSendRedirectMsg(&pWrite->pBatchMasterMsg->rpcMsg, true); + dnodeFreeMWriteMsg(pWrite->pBatchMasterMsg); + } + + mnodeDestroySubMsg(pWrite); + + return; + } + dnodeSendRedirectMsg(&pWrite->rpcMsg, true); dnodeFreeMWriteMsg(pWrite); } else { dDebug("msg:%p, app:%p type:%s is reput into mwrite queue:%p, retry times:%d", pWrite, pWrite->rpcMsg.ahandle, diff --git a/src/dnode/src/dnodeModule.c b/src/dnode/src/dnodeModule.c index 62de85445c2e6756094dea1ac6e7e6a5880f7e18..a661585b3b39df986ac7866a255472e47e789fe6 100644 --- a/src/dnode/src/dnodeModule.c +++ b/src/dnode/src/dnodeModule.c @@ -127,14 +127,16 @@ int32_t dnodeInitModules() { return dnodeStartModules(); } -void dnodeProcessModuleStatus(uint32_t moduleStatus) { +int32_t dnodeProcessModuleStatus(uint32_t moduleStatus) { + int32_t code = 0; + for (int32_t module = TSDB_MOD_MNODE; module < TSDB_MOD_HTTP; ++module) { bool enableModule = moduleStatus & (1 << module); if (!tsModule[module].enable && enableModule) { dInfo("module status:%u is set, start %s module", moduleStatus, tsModule[module].name); tsModule[module].enable = true; dnodeSetModuleStatus(module); - (*tsModule[module].startFp)(); + code = (*tsModule[module].startFp)(); } if (tsModule[module].enable && !enableModule) { @@ -144,21 +146,29 @@ void dnodeProcessModuleStatus(uint32_t moduleStatus) { (*tsModule[module].stopFp)(); } } -} -bool dnodeStartMnode(SMInfos *pMinfos) { - SMInfos *pMnodes = pMinfos; + return code; +} +int32_t dnodeStartMnode(SMInfos *pMinfos) { if (tsModuleStatus & (1 << TSDB_MOD_MNODE)) { dDebug("mnode module is already started, module status:%d", tsModuleStatus); - return false; + return 0; } uint32_t moduleStatus = tsModuleStatus | (1 << TSDB_MOD_MNODE); dInfo("start mnode module, module status:%d, new status:%d", tsModuleStatus, moduleStatus); - dnodeProcessModuleStatus(moduleStatus); - sdbUpdateSync(pMnodes); + int32_t code = dnodeProcessModuleStatus(moduleStatus); + if (code == 0) { + code = sdbUpdateSync(pMinfos); + } + + if (code != 0) { + dError("failed to start mnode module since %s", tstrerror(code)); + moduleStatus = tsModuleStatus & ~(1 << TSDB_MOD_MNODE); + dnodeProcessModuleStatus(moduleStatus); + } - return true; + return code; } diff --git a/src/dnode/src/dnodePeer.c b/src/dnode/src/dnodePeer.c index de0c360c88e8197aea47beee656c7ddb77a26d8e..5ee10abc30311792bcd68e579567eea74fa93821 100644 --- a/src/dnode/src/dnodePeer.c +++ b/src/dnode/src/dnodePeer.c @@ -60,7 +60,7 @@ int32_t dnodeInitServer() { rpcInit.label = "DND-S"; rpcInit.numOfThreads = 1; rpcInit.cfp = dnodeProcessReqMsgFromDnode; - rpcInit.sessions = TSDB_MAX_VNODES; + rpcInit.sessions = TSDB_MAX_VNODES << 4; rpcInit.connType = TAOS_CONN_SERVER; rpcInit.idleTime = tsShellActivityTimer * 1000; @@ -123,7 +123,7 @@ int32_t dnodeInitClient() { rpcInit.label = "DND-C"; rpcInit.numOfThreads = 1; rpcInit.cfp = dnodeProcessRspFromDnode; - rpcInit.sessions = TSDB_MAX_VNODES; + rpcInit.sessions = TSDB_MAX_VNODES << 4; rpcInit.connType = TAOS_CONN_CLIENT; rpcInit.idleTime = tsShellActivityTimer * 1000; rpcInit.user = "t"; diff --git a/src/dnode/src/dnodeVMgmt.c b/src/dnode/src/dnodeVMgmt.c index bc24d1bf623ec014dd4a4ad35442218549aaf335..4a3d6d9a84f7918c8cbbc40cd80b074ff164cf85 100644 --- a/src/dnode/src/dnodeVMgmt.c +++ b/src/dnode/src/dnodeVMgmt.c @@ -214,7 +214,5 @@ static int32_t dnodeProcessCreateMnodeMsg(SRpcMsg *pMsg) { dDebug("mnode index:%d, mnode:%d:%s", i, pCfg->mnodes.mnodeInfos[i].mnodeId, pCfg->mnodes.mnodeInfos[i].mnodeEp); } - dnodeStartMnode(&pCfg->mnodes); - - return TSDB_CODE_SUCCESS; + return dnodeStartMnode(&pCfg->mnodes); } diff --git a/src/dnode/src/dnodeVnodes.c b/src/dnode/src/dnodeVnodes.c index 03b51feb9c9b0de20aec5c3b4e5f0fdae1b8f60a..c62d5a8207c8cd5f540e3ff861d44cd55c7c8220 100644 --- a/src/dnode/src/dnodeVnodes.c +++ b/src/dnode/src/dnodeVnodes.c @@ -73,7 +73,8 @@ static int32_t dnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes) { if (*numOfVnodes >= TSDB_MAX_VNODES) { dError("vgId:%d, too many vnode directory in disk, exist:%d max:%d", vnode, *numOfVnodes, TSDB_MAX_VNODES); - continue; + closedir(dir); + return TSDB_CODE_DND_TOO_MANY_VNODES; } else { vnodeList[*numOfVnodes - 1] = vnode; } @@ -288,4 +289,4 @@ void dnodeSendStatusMsgToMnode() { dInfo("force send status msg to mnode"); taosTmrReset(dnodeSendStatusMsg, 3, NULL, tsDnodeTmr, &tsStatusTimer); } -} \ No newline at end of file +} diff --git a/src/inc/dnode.h b/src/inc/dnode.h index 877738778b022c9c7d38a3801beb5cdc86ff9f4d..5ecaf19f61a022bae849c2f946acb0ee693aeb59 100644 --- a/src/inc/dnode.h +++ b/src/inc/dnode.h @@ -40,7 +40,7 @@ void dnodeGetClusterId(char *clusterId); void dnodeUpdateEp(int32_t dnodeId, char *ep, char *fqdn, uint16_t *port); bool dnodeCheckEpChanged(int32_t dnodeId, char *epstr); -bool dnodeStartMnode(SMInfos *pMinfos); +int32_t dnodeStartMnode(SMInfos *pMinfos); void dnodeAddClientRspHandle(uint8_t msgType, void (*fp)(SRpcMsg *rpcMsg)); void dnodeSendMsgToDnode(SRpcEpSet *epSet, SRpcMsg *rpcMsg); diff --git a/src/inc/mnode.h b/src/inc/mnode.h index bdc30b0c46ced0961715bd48623fdb9e52fb440e..2495a42ba2e5d23cb361e2d64de04d1f710764ea 100644 --- a/src/inc/mnode.h +++ b/src/inc/mnode.h @@ -42,11 +42,12 @@ typedef struct SMnodeMsg { struct SVgObj * pVgroup; struct STableObj *pTable; struct SSTableObj*pSTable; + struct SMnodeMsg *pBatchMasterMsg; SMnodeRsp rpcRsp; - int8_t received; - int8_t successed; - int8_t expected; - int8_t retry; + int16_t received; + int16_t successed; + int16_t expected; + int16_t retry; int32_t incomingTs; int32_t code; void * pObj; @@ -57,13 +58,14 @@ typedef struct SMnodeMsg { void * mnodeCreateMsg(SRpcMsg *pRpcMsg); int32_t mnodeInitMsg(SMnodeMsg *pMsg); void mnodeCleanupMsg(SMnodeMsg *pMsg); +void mnodeDestroySubMsg(SMnodeMsg *pSubMsg); int32_t mnodeInitSystem(); int32_t mnodeStartSystem(); void mnodeCleanupSystem(); void mnodeStopSystem(); void sdbUpdateAsync(); -void sdbUpdateSync(void *pMnodes); +int32_t sdbUpdateSync(void *pMnodes); bool mnodeIsRunning(); int32_t mnodeProcessRead(SMnodeMsg *pMsg); int32_t mnodeProcessWrite(SMnodeMsg *pMsg); diff --git a/src/inc/query.h b/src/inc/query.h index 5e1de77889cc469566cc94b729c55622e5462bd6..7342221cb9de1b632ad0f398f2f3a8d27621747a 100644 --- a/src/inc/query.h +++ b/src/inc/query.h @@ -86,6 +86,7 @@ void qDestroyQueryInfo(qinfo_t qHandle); void* qOpenQueryMgmt(int32_t vgId); void qQueryMgmtNotifyClosed(void* pExecutor); +void qQueryMgmtReOpen(void *pExecutor); void qCleanupQueryMgmt(void* pExecutor); void** qRegisterQInfo(void* pMgmt, uint64_t qInfo); void** qAcquireQInfo(void* pMgmt, uint64_t key); diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index 12cff90be2d776d4ae146512937e6f632e4d15a2..ed88bc15ee240ae7f87eeb2c5f0f665f21128e75 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -126,6 +126,11 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_SHOWOBJ, 0, 0x030B, "Data expir TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_QUERY_ID, 0, 0x030C, "Invalid query id") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_STREAM_ID, 0, 0x030D, "Invalid stream id") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_CONN_ID, 0, 0x030E, "Invalid connection id") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_MNODE_IS_RUNNING, 0, 0x0310, "mnode is alreay running") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_FAILED_TO_CONFIG_SYNC, 0, 0x0311, "failed to config sync") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_FAILED_TO_START_SYNC, 0, 0x0312, "failed to start sync") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_FAILED_TO_CREATE_DIR, 0, 0x0313, "failed to create mnode dir") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_FAILED_TO_INIT_STEP, 0, 0x0314, "failed to init components") TAOS_DEFINE_ERROR(TSDB_CODE_MND_SDB_OBJ_ALREADY_THERE, 0, 0x0320, "Object already there") TAOS_DEFINE_ERROR(TSDB_CODE_MND_SDB_ERROR, 0, 0x0321, "Unexpected generic error in sdb") @@ -194,6 +199,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_DND_OUT_OF_MEMORY, 0, 0x0401, "Dnode out TAOS_DEFINE_ERROR(TSDB_CODE_DND_NO_WRITE_ACCESS, 0, 0x0402, "No permission for disk files in dnode") TAOS_DEFINE_ERROR(TSDB_CODE_DND_INVALID_MSG_LEN, 0, 0x0403, "Invalid message length") TAOS_DEFINE_ERROR(TSDB_CODE_DND_ACTION_IN_PROGRESS, 0, 0x0404, "Action in progress") +TAOS_DEFINE_ERROR(TSDB_CODE_DND_TOO_MANY_VNODES, 0, 0x0405, "Too many vnode directories") // vnode TAOS_DEFINE_ERROR(TSDB_CODE_VND_ACTION_IN_PROGRESS, 0, 0x0500, "Action in progress") diff --git a/src/kit/taosdemox/query.json b/src/kit/taosdemox/query.json index 53d0b319212196257aa3e84be1221bd6e2bd0d8d..b7b08edfc912bdccc12bc6b6672d62a8ee4ad417 100644 --- a/src/kit/taosdemox/query.json +++ b/src/kit/taosdemox/query.json @@ -6,12 +6,12 @@ "user": "root", "password": "taosdata", "databases": "db01", - "super_table_query": - {"rate":1, "concurrent":1, + "specified_table_query": + {"query_interval":1, "concurrent":1, "sqls": [{"sql": "select count(*) from stb01", "result": "./query_res0.txt"}] }, - "sub_table_query": - {"stblname": "stb01", "rate":1, "threads":1, - "sqls": [{"sql": "select count(*) from xxxx", "result": "./query_res1.txt"}] + "super_table_query": + {"stblname": "stb01", "query_interval":1, "threads":1, + "sqls": [{"sql": "select count(*) from xxxx", "result": "./query_res1.txt"}] } } diff --git a/src/kit/taosdemox/taosdemox.c b/src/kit/taosdemox/taosdemox.c index 5c9fd025f0a39e03ea8eaedc1bc81bc7e88d8eb4..97e7b426675da6d9a5398da52fe6e34e86b3562d 100644 --- a/src/kit/taosdemox/taosdemox.c +++ b/src/kit/taosdemox/taosdemox.c @@ -1441,11 +1441,12 @@ static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName, char* sTblName strncpy(pTblName, (char *)row[0], TSDB_TABLE_NAME_LEN); //printf("==== sub table name: %s\n", pTblName); count++; - if (count == childTblCount) { - char *tmp = realloc(childTblName, (size_t)count*1.5*TSDB_TABLE_NAME_LEN); + if (count >= childTblCount - 1) { + char *tmp = realloc(childTblName, (size_t)childTblCount*1.5*TSDB_TABLE_NAME_LEN+1); if (tmp != NULL) { childTblName = tmp; - memset(childTblName + count*TSDB_TABLE_NAME_LEN, 0, (size_t)(count*0.5*TSDB_TABLE_NAME_LEN)); + childTblCount = (int)(childTblCount*1.5); + memset(childTblName + count*TSDB_TABLE_NAME_LEN, 0, (size_t)((childTblCount-count)*TSDB_TABLE_NAME_LEN)); } else { // exit, if allocate more memory failed printf("realloc fail for save child table name of %s.%s\n", dbName, sTblName); @@ -3960,7 +3961,11 @@ void *superQueryProcess(void *sarg) { for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { int64_t t1 = taosGetTimestampUs(); - selectAndGetResult(winfo->taos, g_queryInfo.superQueryInfo.sql[i], g_queryInfo.superQueryInfo.result[i]); + char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; + if (g_queryInfo.superQueryInfo.result[i][0] != 0) { + sprintf(tmpFile, "%s-%d", g_queryInfo.superQueryInfo.result[i], winfo->threadID); + } + selectAndGetResult(winfo->taos, g_queryInfo.superQueryInfo.sql[i], tmpFile); int64_t t2 = taosGetTimestampUs(); printf("taosc select sql return, Spent %f s\n", (t2 - t1)/1000000.0); } else { @@ -4019,7 +4024,11 @@ void *subQueryProcess(void *sarg) { for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) { memset(sqlstr,0,sizeof(sqlstr)); replaceSubTblName(g_queryInfo.subQueryInfo.sql[i], sqlstr, i); - selectAndGetResult(winfo->taos, sqlstr, g_queryInfo.subQueryInfo.result[i]); + char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; + if (g_queryInfo.subQueryInfo.result[i][0] != 0) { + sprintf(tmpFile, "%s-%d", g_queryInfo.subQueryInfo.result[i], winfo->threadID); + } + selectAndGetResult(winfo->taos, sqlstr, tmpFile); } } et = taosGetTimestampMs(); @@ -4193,7 +4202,11 @@ void *subSubscribeProcess(void *sarg) { sprintf(topic, "taosdemo-subscribe-%d", i); memset(subSqlstr,0,sizeof(subSqlstr)); replaceSubTblName(g_queryInfo.subQueryInfo.sql[i], subSqlstr, i); - g_queryInfo.subQueryInfo.tsub[i] = subscribeImpl(winfo->taos, subSqlstr, topic, g_queryInfo.subQueryInfo.result[i]); + char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; + if (g_queryInfo.subQueryInfo.result[i][0] != 0) { + sprintf(tmpFile, "%s-%d", g_queryInfo.subQueryInfo.result[i], winfo->threadID); + } + g_queryInfo.subQueryInfo.tsub[i] = subscribeImpl(winfo->taos, subSqlstr, topic, tmpFile); if (NULL == g_queryInfo.subQueryInfo.tsub[i]) { return NULL; } @@ -4211,7 +4224,11 @@ void *subSubscribeProcess(void *sarg) { TAOS_RES* res = taos_consume(g_queryInfo.subQueryInfo.tsub[i]); if (res) { - getResult(res, g_queryInfo.subQueryInfo.result[i]); + char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; + if (g_queryInfo.subQueryInfo.result[i][0] != 0) { + sprintf(tmpFile, "%s-%d", g_queryInfo.subQueryInfo.result[i], winfo->threadID); + } + getResult(res, tmpFile); taos_free_result(res); } } @@ -4244,7 +4261,11 @@ void *superSubscribeProcess(void *sarg) { char topic[32] = {0}; for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { sprintf(topic, "taosdemo-subscribe-%d", i); - g_queryInfo.superQueryInfo.tsub[i] = subscribeImpl(winfo->taos, g_queryInfo.superQueryInfo.sql[i], topic, g_queryInfo.superQueryInfo.result[i]); + char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; + if (g_queryInfo.subQueryInfo.result[i][0] != 0) { + sprintf(tmpFile, "%s-%d", g_queryInfo.superQueryInfo.result[i], winfo->threadID); + } + g_queryInfo.superQueryInfo.tsub[i] = subscribeImpl(winfo->taos, g_queryInfo.superQueryInfo.sql[i], topic, tmpFile); if (NULL == g_queryInfo.superQueryInfo.tsub[i]) { return NULL; } @@ -4262,7 +4283,11 @@ void *superSubscribeProcess(void *sarg) { TAOS_RES* res = taos_consume(g_queryInfo.superQueryInfo.tsub[i]); if (res) { - getResult(res, g_queryInfo.superQueryInfo.result[i]); + char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; + if (g_queryInfo.superQueryInfo.result[i][0] != 0) { + sprintf(tmpFile, "%s-%d", g_queryInfo.superQueryInfo.result[i], winfo->threadID); + } + getResult(res, tmpFile); taos_free_result(res); } } diff --git a/src/mnode/src/mnodeDb.c b/src/mnode/src/mnodeDb.c index 333844ccbea801f6ad0bd3c31bdf0ee7f9287c85..8a03b1cd0e1029ef650f13f21a4c3548557b0527 100644 --- a/src/mnode/src/mnodeDb.c +++ b/src/mnode/src/mnodeDb.c @@ -311,6 +311,11 @@ static int32_t mnodeCheckDbCfg(SDbCfg *pCfg) { return TSDB_CODE_MND_INVALID_DB_OPTION; } + if (pCfg->replications > mnodeGetDnodesNum()) { + mError("no enough dnode to config replica: %d, #dnodes: %d", pCfg->replications, mnodeGetDnodesNum()); + return TSDB_CODE_MND_INVALID_DB_OPTION; + } + if (pCfg->quorum < TSDB_MIN_DB_REPLICA_OPTION || pCfg->quorum > TSDB_MAX_DB_REPLICA_OPTION) { mError("invalid db option quorum:%d valid range: [%d, %d]", pCfg->quorum, TSDB_MIN_DB_REPLICA_OPTION, TSDB_MAX_DB_REPLICA_OPTION); diff --git a/src/mnode/src/mnodeMain.c b/src/mnode/src/mnodeMain.c index 6e001f4dfbb64c9d73581fef624b4b763b39d50c..7ef0488c420dd470c3afc5d7ca8ac7a518ccdc73 100644 --- a/src/mnode/src/mnodeMain.c +++ b/src/mnode/src/mnodeMain.c @@ -74,13 +74,13 @@ static int32_t mnodeInitComponents() { int32_t mnodeStartSystem() { if (tsMgmtIsRunning) { mInfo("mnode module already started..."); - return 0; + return TSDB_CODE_SUCCESS; } mInfo("starting to initialize mnode ..."); if (mkdir(tsMnodeDir, 0755) != 0 && errno != EEXIST) { mError("failed to init mnode dir:%s, reason:%s", tsMnodeDir, strerror(errno)); - return -1; + return TSDB_CODE_MND_FAILED_TO_CREATE_DIR; } dnodeAllocMWritequeue(); @@ -88,7 +88,7 @@ int32_t mnodeStartSystem() { dnodeAllocateMPeerQueue(); if (mnodeInitComponents() != 0) { - return -1; + return TSDB_CODE_MND_FAILED_TO_INIT_STEP; } dnodeReportStep("mnode-grant", "start to set grant infomation", 0); @@ -99,7 +99,7 @@ int32_t mnodeStartSystem() { sdbUpdateSync(NULL); - return 0; + return TSDB_CODE_SUCCESS; } int32_t mnodeInitSystem() { diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c index 6997d0a666555e1237b7f5e019368c822ed2a6b1..9d2bfe0ce15f2579bdffd95095ca235bdadff286 100644 --- a/src/mnode/src/mnodeSdb.c +++ b/src/mnode/src/mnodeSdb.c @@ -318,11 +318,11 @@ void sdbUpdateAsync() { taosTmrReset(sdbUpdateSyncTmrFp, 200, NULL, tsMnodeTmr, &tsSdbTmr); } -void sdbUpdateSync(void *pMnodes) { +int32_t sdbUpdateSync(void *pMnodes) { SMInfos *pMinfos = pMnodes; if (!mnodeIsRunning()) { mDebug("vgId:1, mnode not start yet, update sync config later"); - return; + return TSDB_CODE_MND_MNODE_IS_RUNNING; } mDebug("vgId:1, update sync config, pMnodes:%p", pMnodes); @@ -377,12 +377,12 @@ void sdbUpdateSync(void *pMnodes) { if (!hasThisDnode) { sdbDebug("vgId:1, update sync config, this dnode not exist"); - return; + return TSDB_CODE_MND_FAILED_TO_CONFIG_SYNC; } if (memcmp(&syncCfg, &tsSdbMgmt.cfg, sizeof(SSyncCfg)) == 0) { sdbDebug("vgId:1, update sync config, info not changed"); - return; + return TSDB_CODE_SUCCESS; } sdbInfo("vgId:1, work as mnode, replica:%d", syncCfg.replica); @@ -407,12 +407,15 @@ void sdbUpdateSync(void *pMnodes) { tsSdbMgmt.cfg = syncCfg; if (tsSdbMgmt.sync) { - syncReconfig(tsSdbMgmt.sync, &syncCfg); + int32_t code = syncReconfig(tsSdbMgmt.sync, &syncCfg); + if (code != 0) return code; } else { tsSdbMgmt.sync = syncStart(&syncInfo); + if (tsSdbMgmt.sync <= 0) return TSDB_CODE_MND_FAILED_TO_START_SYNC; } sdbUpdateMnodeRoles(); + return TSDB_CODE_SUCCESS; } int32_t sdbInitRef() { @@ -1051,7 +1054,10 @@ static int32_t sdbWriteFwdToQueue(int32_t vgId, void *wparam, int32_t qtype, voi memcpy(pRow->pHead, pHead, sizeof(SWalHead) + pHead->len); pRow->rowData = pRow->pHead->cont; - return sdbWriteToQueue(pRow, qtype); + int32_t code = sdbWriteToQueue(pRow, qtype); + if (code == TSDB_CODE_MND_ACTION_IN_PROGRESS) code = 0; + + return code; } static int32_t sdbWriteRowToQueue(SSdbRow *pInputRow, int32_t action) { diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 4cd11dce1c1a62ff52590b4c4648c289cd1fcb1b..f53a4209b06fe6fc04516535bd33e90b95af32bf 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -26,6 +26,7 @@ #include "tcompare.h" #include "tdataformat.h" #include "tgrant.h" +#include "tqueue.h" #include "hash.h" #include "mnode.h" #include "dnode.h" @@ -720,6 +721,133 @@ static void mnodeExtractTableName(char* tableId, char* name) { } } +static SMnodeMsg *mnodeCreateSubMsg(SMnodeMsg *pBatchMasterMsg, int32_t contSize) { + SMnodeMsg *pSubMsg = taosAllocateQitem(sizeof(*pBatchMasterMsg) + contSize); + *pSubMsg = *pBatchMasterMsg; + + //pSubMsg->pCont = (char *) pSubMsg + sizeof(SMnodeMsg); + pSubMsg->rpcMsg.pCont = pSubMsg->pCont; + pSubMsg->successed = 0; + pSubMsg->expected = 0; + SCMCreateTableMsg *pCM = pSubMsg->rpcMsg.pCont; + pCM->numOfTables = htonl(1); + pCM->contLen = htonl(contSize); + + return pSubMsg; +} + +void mnodeDestroySubMsg(SMnodeMsg *pSubMsg) { + if (pSubMsg) { + // pUser is retained in batch master msg + if (pSubMsg->pDb) mnodeDecDbRef(pSubMsg->pDb); + if (pSubMsg->pVgroup) mnodeDecVgroupRef(pSubMsg->pVgroup); + if (pSubMsg->pTable) mnodeDecTableRef(pSubMsg->pTable); + if (pSubMsg->pSTable) mnodeDecTableRef(pSubMsg->pSTable); + if (pSubMsg->pAcct) mnodeDecAcctRef(pSubMsg->pAcct); + if (pSubMsg->pDnode) mnodeDecDnodeRef(pSubMsg->pDnode); + + taosFreeQitem(pSubMsg); + } +} + +static int32_t mnodeValidateCreateTableMsg(SCreateTableMsg *pCreateTable, SMnodeMsg *pMsg) { + if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDb(pCreateTable->db); + if (pMsg->pDb == NULL) { + mError("msg:%p, app:%p table:%s, failed to create, db not selected", pMsg, pMsg->rpcMsg.ahandle, pCreateTable->tableId); + return TSDB_CODE_MND_DB_NOT_SELECTED; + } + + if (pMsg->pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pMsg->pDb->name, pMsg->pDb->status); + return TSDB_CODE_MND_DB_IN_DROPPING; + } + + if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pCreateTable->tableId); + if (pMsg->pTable != NULL && pMsg->retry == 0) { + if (pCreateTable->getMeta) { + mDebug("msg:%p, app:%p table:%s, continue to get meta", pMsg, pMsg->rpcMsg.ahandle, pCreateTable->tableId); + return mnodeGetChildTableMeta(pMsg); + } else if (pCreateTable->igExists) { + mDebug("msg:%p, app:%p table:%s, is already exist", pMsg, pMsg->rpcMsg.ahandle, pCreateTable->tableId); + return TSDB_CODE_SUCCESS; + } else { + mError("msg:%p, app:%p table:%s, failed to create, table already exist", pMsg, pMsg->rpcMsg.ahandle, + pCreateTable->tableId); + return TSDB_CODE_MND_TABLE_ALREADY_EXIST; + } + } + + if (pCreateTable->numOfTags != 0) { + mDebug("msg:%p, app:%p table:%s, create stable msg is received from thandle:%p", pMsg, pMsg->rpcMsg.ahandle, + pCreateTable->tableId, pMsg->rpcMsg.handle); + return mnodeProcessCreateSuperTableMsg(pMsg); + } else { + mDebug("msg:%p, app:%p table:%s, create ctable msg is received from thandle:%p", pMsg, pMsg->rpcMsg.ahandle, + pCreateTable->tableId, pMsg->rpcMsg.handle); + return mnodeProcessCreateChildTableMsg(pMsg); + } +} + +static int32_t mnodeProcessBatchCreateTableMsg(SMnodeMsg *pMsg) { + if (pMsg->pBatchMasterMsg == NULL) { // batch master first round + pMsg->pBatchMasterMsg = pMsg; + + SCMCreateTableMsg *pCreate = pMsg->rpcMsg.pCont; + int32_t numOfTables = htonl(pCreate->numOfTables); + int32_t contentLen = htonl(pCreate->contLen); + pMsg->expected = numOfTables; + + int32_t code = TSDB_CODE_SUCCESS; + SCreateTableMsg *pCreateTable = (SCreateTableMsg*) ((char*) pCreate + sizeof(SCMCreateTableMsg)); + for (SCreateTableMsg *p = pCreateTable; p < (SCreateTableMsg *) ((char *) pCreate + contentLen); p = (SCreateTableMsg *) ((char *) p + htonl(p->len))) { + SMnodeMsg *pSubMsg = mnodeCreateSubMsg(pMsg, sizeof(SCMCreateTableMsg) + htonl(p->len)); + memcpy(pSubMsg->pCont + sizeof(SCMCreateTableMsg), p, htonl(p->len)); + code = mnodeValidateCreateTableMsg(p, pSubMsg); + + if (code == TSDB_CODE_SUCCESS || code == TSDB_CODE_MND_TABLE_ALREADY_EXIST) { + ++pSubMsg->pBatchMasterMsg->successed; + mnodeDestroySubMsg(pSubMsg); + continue; + } + + if (code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { + mnodeDestroySubMsg(pSubMsg); + return code; + } + } + + if (pMsg->successed >= pMsg->expected) { + return code; + } else { + return TSDB_CODE_MND_ACTION_IN_PROGRESS; + } + } else { + if (pMsg->pBatchMasterMsg != pMsg) { // batch sub replay + SCMCreateTableMsg *pCreate = pMsg->rpcMsg.pCont; + SCreateTableMsg *pCreateTable = (SCreateTableMsg*) ((char*) pCreate + sizeof(SCMCreateTableMsg)); + int32_t code = mnodeValidateCreateTableMsg(pCreateTable, pMsg); + if (code == TSDB_CODE_SUCCESS || code == TSDB_CODE_MND_TABLE_ALREADY_EXIST) { + ++pMsg->pBatchMasterMsg->successed; + mnodeDestroySubMsg(pMsg); + } else if (code == TSDB_CODE_MND_ACTION_NEED_REPROCESSED) { + return code; + } else if (code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { + ++pMsg->pBatchMasterMsg->received; + mnodeDestroySubMsg(pMsg); + } + + if (pMsg->pBatchMasterMsg->successed + pMsg->pBatchMasterMsg->received + >= pMsg->pBatchMasterMsg->expected) { + dnodeSendRpcMWriteRsp(pMsg->pBatchMasterMsg, TSDB_CODE_SUCCESS); + } + + return TSDB_CODE_MND_ACTION_IN_PROGRESS; + } else { // batch master replay, reprocess the whole batch + assert(0); + } + } +} + static int32_t mnodeProcessCreateTableMsg(SMnodeMsg *pMsg) { SCMCreateTableMsg *pCreate = pMsg->rpcMsg.pCont; @@ -729,6 +857,11 @@ static int32_t mnodeProcessCreateTableMsg(SMnodeMsg *pMsg) { // todo return error } + // batch master msg first round or reprocessing and batch sub msg reprocessing + if (numOfTables > 1 || pMsg->pBatchMasterMsg != NULL) { + return mnodeProcessBatchCreateTableMsg(pMsg); + } + SCreateTableMsg *p = (SCreateTableMsg*)((char*) pCreate + sizeof(SCMCreateTableMsg)); if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDb(p->db); if (pMsg->pDb == NULL) { @@ -1737,6 +1870,18 @@ static int32_t mnodeDoCreateChildTableCb(SMnodeMsg *pMsg, int32_t code) { mDebug("msg:%p, app:%p table:%s, created in dnode, thandle:%p", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId, pMsg->rpcMsg.handle); + if (pMsg->pBatchMasterMsg) { + ++pMsg->pBatchMasterMsg->successed; + if (pMsg->pBatchMasterMsg->successed + pMsg->pBatchMasterMsg->received + >= pMsg->pBatchMasterMsg->expected) { + dnodeSendRpcMWriteRsp(pMsg->pBatchMasterMsg, code); + } + + mnodeDestroySubMsg(pMsg); + + return TSDB_CODE_MND_ACTION_IN_PROGRESS; + } + dnodeSendRpcMWriteRsp(pMsg, TSDB_CODE_SUCCESS); } return TSDB_CODE_MND_ACTION_IN_PROGRESS; @@ -2478,6 +2623,19 @@ static void mnodeProcessCreateChildTableRsp(SRpcMsg *rpcMsg) { mnodeSendDropChildTableMsg(pMsg, false); rpcMsg->code = TSDB_CODE_SUCCESS; + + if (pMsg->pBatchMasterMsg) { + ++pMsg->pBatchMasterMsg->successed; + if (pMsg->pBatchMasterMsg->successed + pMsg->pBatchMasterMsg->received + >= pMsg->pBatchMasterMsg->expected) { + dnodeSendRpcMWriteRsp(pMsg->pBatchMasterMsg, rpcMsg->code); + } + + mnodeDestroySubMsg(pMsg); + + return; + } + dnodeSendRpcMWriteRsp(pMsg, rpcMsg->code); return; } @@ -2495,6 +2653,19 @@ static void mnodeProcessCreateChildTableRsp(SRpcMsg *rpcMsg) { if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { pMsg->pTable = NULL; mnodeDestroyChildTable(pTable); + + if (pMsg->pBatchMasterMsg) { + ++pMsg->pBatchMasterMsg->received; + if (pMsg->pBatchMasterMsg->successed + pMsg->pBatchMasterMsg->received + >= pMsg->pBatchMasterMsg->expected) { + dnodeSendRpcMWriteRsp(pMsg->pBatchMasterMsg, code); + } + + mnodeDestroySubMsg(pMsg); + + return; + } + dnodeSendRpcMWriteRsp(pMsg, code); } } else { @@ -2520,6 +2691,19 @@ static void mnodeProcessCreateChildTableRsp(SRpcMsg *rpcMsg) { //Avoid retry again in client rpcMsg->code = TSDB_CODE_MND_VGROUP_NOT_READY; } + + if (pMsg->pBatchMasterMsg) { + ++pMsg->pBatchMasterMsg->received; + if (pMsg->pBatchMasterMsg->successed + pMsg->pBatchMasterMsg->received + >= pMsg->pBatchMasterMsg->expected) { + dnodeSendRpcMWriteRsp(pMsg->pBatchMasterMsg, rpcMsg->code); + } + + mnodeDestroySubMsg(pMsg); + + return; + } + dnodeSendRpcMWriteRsp(pMsg, rpcMsg->code); } } diff --git a/src/mnode/src/mnodeVgroup.c b/src/mnode/src/mnodeVgroup.c index 5b2e89ce16447a6cc011155f82a6db5e203e242b..827be0687dbac338d660d07fefaf563d5c5d0341 100644 --- a/src/mnode/src/mnodeVgroup.c +++ b/src/mnode/src/mnodeVgroup.c @@ -986,6 +986,19 @@ static void mnodeProcessCreateVnodeRsp(SRpcMsg *rpcMsg) { if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { mnodeMsg->pVgroup = NULL; mnodeDestroyVgroup(pVgroup); + + if (mnodeMsg->pBatchMasterMsg) { + ++mnodeMsg->pBatchMasterMsg->received; + if (mnodeMsg->pBatchMasterMsg->successed + mnodeMsg->pBatchMasterMsg->received + >= mnodeMsg->pBatchMasterMsg->expected) { + dnodeSendRpcMWriteRsp(mnodeMsg->pBatchMasterMsg, code); + } + + mnodeDestroySubMsg(mnodeMsg); + + return; + } + dnodeSendRpcMWriteRsp(mnodeMsg, code); } } else { @@ -995,6 +1008,19 @@ static void mnodeProcessCreateVnodeRsp(SRpcMsg *rpcMsg) { .pObj = pVgroup }; sdbDeleteRow(&row); + + if (mnodeMsg->pBatchMasterMsg) { + ++mnodeMsg->pBatchMasterMsg->received; + if (mnodeMsg->pBatchMasterMsg->successed + mnodeMsg->pBatchMasterMsg->received + >= mnodeMsg->pBatchMasterMsg->expected) { + dnodeSendRpcMWriteRsp(mnodeMsg->pBatchMasterMsg, mnodeMsg->code); + } + + mnodeDestroySubMsg(mnodeMsg); + + return; + } + dnodeSendRpcMWriteRsp(mnodeMsg, mnodeMsg->code); } } @@ -1192,4 +1218,4 @@ void mnodeSetVgidVer(int8_t *cver, uint64_t iver) { cver[0] = (int8_t)((int32_t)(iver % 1000000) / 10000); cver[1] = (int8_t)((int32_t)(iver % 100000) / 100); cver[2] = (int8_t)(iver % 100); -} \ No newline at end of file +} diff --git a/src/os/src/detail/osTimer.c b/src/os/src/detail/osTimer.c index 9883a03a0933075616b69800fe6e34a36fc6c746..1d3ba30def9416785d72556ac218bd5a48aead77 100644 --- a/src/os/src/detail/osTimer.c +++ b/src/os/src/detail/osTimer.c @@ -89,12 +89,17 @@ int taosInitTimer(void (*callback)(int), int ms) { if (code != 0) { uError("failed to create timer thread"); return -1; + } else { + uDebug("timer thread:0x%08" PRIx64 " is created", taosGetPthreadId(timerThread)); } + return 0; } void taosUninitTimer() { stopTimer = true; + + uDebug("join timer thread:0x%08" PRIx64, taosGetPthreadId(timerThread)); pthread_join(timerThread, NULL); } diff --git a/src/plugins/http/src/httpResp.c b/src/plugins/http/src/httpResp.c index a41367ad7faa74345b4e3fb9899e815e21c9b4fb..72604e79b710f09e67fde825915db63bc62243d9 100644 --- a/src/plugins/http/src/httpResp.c +++ b/src/plugins/http/src/httpResp.c @@ -50,9 +50,16 @@ static void httpSendErrorRespImp(HttpContext *pContext, int32_t httpCode, char * char head[512] = {0}; char body[512] = {0}; + int8_t httpVersion = 0; + int8_t keepAlive = 0; + if (pContext->parser != NULL) { + httpVersion = pContext->parser->httpVersion; + keepAlive = pContext->parser->keepAlive; + } + int32_t bodyLen = sprintf(body, httpRespTemplate[HTTP_RESPONSE_JSON_ERROR], errNo, desc); - int32_t headLen = sprintf(head, httpRespTemplate[HTTP_RESPONSE_ERROR], httpVersionStr[pContext->parser->httpVersion], - httpCode, httpCodeStr, httpKeepAliveStr[pContext->parser->keepAlive], bodyLen); + int32_t headLen = sprintf(head, httpRespTemplate[HTTP_RESPONSE_ERROR], httpVersionStr[httpVersion], httpCode, + httpCodeStr, httpKeepAliveStr[keepAlive], bodyLen); httpWriteBuf(pContext, head, headLen); httpWriteBuf(pContext, body, bodyLen); @@ -164,9 +171,16 @@ void httpSendSuccResp(HttpContext *pContext, char *desc) { char head[1024] = {0}; char body[1024] = {0}; + int8_t httpVersion = 0; + int8_t keepAlive = 0; + if (pContext->parser != NULL) { + httpVersion = pContext->parser->httpVersion; + keepAlive = pContext->parser->keepAlive; + } + int32_t bodyLen = sprintf(body, httpRespTemplate[HTTP_RESPONSE_JSON_OK], TSDB_CODE_SUCCESS, desc); - int32_t headLen = sprintf(head, httpRespTemplate[HTTP_RESPONSE_OK], httpVersionStr[pContext->parser->httpVersion], - httpKeepAliveStr[pContext->parser->keepAlive], bodyLen); + int32_t headLen = sprintf(head, httpRespTemplate[HTTP_RESPONSE_OK], httpVersionStr[httpVersion], + httpKeepAliveStr[keepAlive], bodyLen); httpWriteBuf(pContext, head, headLen); httpWriteBuf(pContext, body, bodyLen); @@ -177,9 +191,16 @@ void httpSendOptionResp(HttpContext *pContext, char *desc) { char head[1024] = {0}; char body[1024] = {0}; + int8_t httpVersion = 0; + int8_t keepAlive = 0; + if (pContext->parser != NULL) { + httpVersion = pContext->parser->httpVersion; + keepAlive = pContext->parser->keepAlive; + } + int32_t bodyLen = sprintf(body, httpRespTemplate[HTTP_RESPONSE_JSON_OK], TSDB_CODE_SUCCESS, desc); - int32_t headLen = sprintf(head, httpRespTemplate[HTTP_RESPONSE_OPTIONS], httpVersionStr[pContext->parser->httpVersion], - httpKeepAliveStr[pContext->parser->keepAlive], bodyLen); + int32_t headLen = sprintf(head, httpRespTemplate[HTTP_RESPONSE_OPTIONS], httpVersionStr[httpVersion], + httpKeepAliveStr[keepAlive], bodyLen); httpWriteBuf(pContext, head, headLen); httpWriteBuf(pContext, body, bodyLen); diff --git a/src/query/inc/qTsbuf.h b/src/query/inc/qTsbuf.h index 90bd64336fdeed91deb68b9b490224a7fb29bc80..5d055782c9b82a1444c97a62d429cc2ba9a53986 100644 --- a/src/query/inc/qTsbuf.h +++ b/src/query/inc/qTsbuf.h @@ -88,6 +88,7 @@ typedef struct STSBuf { STSList tsData; // uncompressed raw ts data uint64_t numOfTotal; bool autoDelete; + bool remainOpen; int32_t tsOrder; // order of timestamp in ts comp buffer STSCursor cur; } STSBuf; diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index 98531e1750cb9da532338b1bd1345abceedd9efe..9fccb0d627fc4574ef90865e94e43c10cbdfc9bf 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -3838,8 +3838,10 @@ static void ts_comp_finalize(SQLFunctionCtx *pCtx) { STSBuf * pTSbuf = pInfo->pTSBuf; tsBufFlush(pTSbuf); - strcpy(pCtx->aOutputBuf, pTSbuf->path); + *(FILE **)pCtx->aOutputBuf = pTSbuf->f; + + pTSbuf->remainOpen = true; tsBufDestroy(pTSbuf); doFinalizer(pCtx); } diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index c75504abecc92cd6ccddd51f0f08f782852f695a..7b544040bfa97d2bab210b2e1833154137f98b40 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -758,7 +758,12 @@ static void doUpdateResultRowIndex(SResultRowInfo*pResultRowInfo, TSKEY lastKey, } } - pResultRowInfo->curIndex = i + 1; // current not closed result object + if (i == pResultRowInfo->size - 1) { + pResultRowInfo->curIndex = i; + } else { + pResultRowInfo->curIndex = i + 1; // current not closed result object + } + pResultRowInfo->prevSKey = pResultRowInfo->pResult[pResultRowInfo->curIndex]->win.skey; } } @@ -1667,7 +1672,7 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS _end: assert(offset >= 0 && tsCols != NULL); - if (prevTs != INT64_MIN) { + if (prevTs != INT64_MIN && prevTs != *(int64_t*)pRuntimeEnv->prevRow[0]) { assert(prevRowIndex >= 0); item->lastKey = prevTs + step; } @@ -2005,6 +2010,7 @@ static void doFreeQueryHandle(SQInfo* pQInfo) { assert(pMemRef->ref == 0 && pMemRef->imem == NULL && pMemRef->mem == NULL); } + static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { if (pRuntimeEnv->pQuery == NULL) { return; @@ -2016,6 +2022,16 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { qDebug("QInfo:%p teardown runtime env", pQInfo); cleanupResultRowInfo(&pRuntimeEnv->windowResInfo); + if (isTSCompQuery(pQuery)) { + FILE *f = *(FILE **)pQuery->sdata[0]->data; + + if (f) { + fclose(f); + *(FILE **)pQuery->sdata[0]->data = NULL; + } + } + + if (pRuntimeEnv->pCtx != NULL) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i]; @@ -6944,10 +6960,10 @@ static size_t getResultSize(SQInfo *pQInfo, int64_t *numOfRows) { * TODO handle the case that the file is too large to send back one time */ if (isTSCompQuery(pQuery) && (*numOfRows) > 0) { - struct stat fstat; - if (stat(pQuery->sdata[0]->data, &fstat) == 0) { - *numOfRows = fstat.st_size; - return fstat.st_size; + struct stat fStat; + if (fstat(fileno(*(FILE **)pQuery->sdata[0]->data), &fStat) == 0) { + *numOfRows = fStat.st_size; + return fStat.st_size; } else { qError("QInfo:%p failed to get file info, path:%s, reason:%s", pQInfo, pQuery->sdata[0]->data, strerror(errno)); return 0; @@ -6963,15 +6979,16 @@ static int32_t doDumpQueryResult(SQInfo *pQInfo, char *data) { // load data from file to msg buffer if (isTSCompQuery(pQuery)) { - int32_t fd = open(pQuery->sdata[0]->data, O_RDONLY, 0666); + + FILE *f = *(FILE **)pQuery->sdata[0]->data; // make sure file exist - if (FD_VALID(fd)) { - uint64_t s = lseek(fd, 0, SEEK_END); + if (f) { + off_t s = lseek(fileno(f), 0, SEEK_END); - qDebug("QInfo:%p ts comp data return, file:%s, size:%"PRId64, pQInfo, pQuery->sdata[0]->data, s); - if (lseek(fd, 0, SEEK_SET) >= 0) { - size_t sz = read(fd, data, (uint32_t) s); + qDebug("QInfo:%p ts comp data return, file:%p, size:%"PRId64, pQInfo, f, s); + if (fseek(f, 0, SEEK_SET) >= 0) { + size_t sz = fread(data, 1, s, f); if(sz < s) { // todo handle error assert(0); } @@ -6979,15 +6996,8 @@ static int32_t doDumpQueryResult(SQInfo *pQInfo, char *data) { UNUSED(s); } - close(fd); - unlink(pQuery->sdata[0]->data); - } else { - // todo return the error code to client and handle invalid fd - qError("QInfo:%p failed to open tmp file to send ts-comp data to client, path:%s, reason:%s", pQInfo, - pQuery->sdata[0]->data, strerror(errno)); - if (fd != -1) { - close(fd); - } + fclose(f); + *(FILE **)pQuery->sdata[0]->data = NULL; } // all data returned, set query over @@ -7635,6 +7645,19 @@ void qQueryMgmtNotifyClosed(void* pQMgmt) { taosCacheRefresh(pQueryMgmt->qinfoPool, queryMgmtKillQueryFn); } +void qQueryMgmtReOpen(void *pQMgmt) { + if (pQMgmt == NULL) { + return; + } + + SQueryMgmt *pQueryMgmt = pQMgmt; + qDebug("vgId:%d, set querymgmt reopen", pQueryMgmt->vgId); + + pthread_mutex_lock(&pQueryMgmt->lock); + pQueryMgmt->closed = false; + pthread_mutex_unlock(&pQueryMgmt->lock); +} + void qCleanupQueryMgmt(void* pQMgmt) { if (pQMgmt == NULL) { return; @@ -7715,4 +7738,4 @@ void** qReleaseQInfo(void* pMgmt, void* pQInfo, bool freeHandle) { taosCacheRelease(pQueryMgmt->qinfoPool, pQInfo, freeHandle); return 0; -} \ No newline at end of file +} diff --git a/src/query/src/qTsbuf.c b/src/query/src/qTsbuf.c index d0c59fe5efe08aa1c9dbe3bbe65db139b074ebee..a5d4690a8e101ccd87985ace36b2abbe1412a2e2 100644 --- a/src/query/src/qTsbuf.c +++ b/src/query/src/qTsbuf.c @@ -19,6 +19,8 @@ STSBuf* tsBufCreate(bool autoDelete, int32_t order) { if (pTSBuf == NULL) { return NULL; } + + pTSBuf->autoDelete = autoDelete; taosGetTmpfilePath("join", pTSBuf->path); pTSBuf->f = fopen(pTSBuf->path, "w+"); @@ -26,6 +28,10 @@ STSBuf* tsBufCreate(bool autoDelete, int32_t order) { free(pTSBuf); return NULL; } + + if (!autoDelete) { + unlink(pTSBuf->path); + } if (NULL == allocResForTSBuf(pTSBuf)) { return NULL; @@ -37,8 +43,7 @@ STSBuf* tsBufCreate(bool autoDelete, int32_t order) { tsBufResetPos(pTSBuf); pTSBuf->cur.order = TSDB_ORDER_ASC; - - pTSBuf->autoDelete = autoDelete; + pTSBuf->tsOrder = order; return pTSBuf; @@ -49,6 +54,8 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) { if (pTSBuf == NULL) { return NULL; } + + pTSBuf->autoDelete = autoDelete; tstrncpy(pTSBuf->path, path, sizeof(pTSBuf->path)); @@ -129,7 +136,6 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) { // ascending by default pTSBuf->cur.order = TSDB_ORDER_ASC; - pTSBuf->autoDelete = autoDelete; // tscDebug("create tsBuf from file:%s, fd:%d, size:%d, numOfGroups:%d, autoDelete:%d", pTSBuf->path, fileno(pTSBuf->f), // pTSBuf->fileSize, pTSBuf->numOfGroups, pTSBuf->autoDelete); @@ -147,8 +153,10 @@ void* tsBufDestroy(STSBuf* pTSBuf) { tfree(pTSBuf->pData); tfree(pTSBuf->block.payload); - - fclose(pTSBuf->f); + + if (!pTSBuf->remainOpen) { + fclose(pTSBuf->f); + } if (pTSBuf->autoDelete) { // ("tsBuf %p destroyed, delete tmp file:%s", pTSBuf, pTSBuf->path); diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index 13d6ed8ed5db31839bffb7b868ff22d9a65486b4..af51733c21fa417c9a8de6d53e15ec92555f35cd 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -1567,6 +1567,7 @@ static int rpcCheckAuthentication(SRpcConn *pConn, char *msg, int msgLen) { // for response, if code is auth failure, it shall bypass the auth process code = htonl(pHead->code); if (code == TSDB_CODE_RPC_INVALID_TIME_STAMP || code == TSDB_CODE_RPC_AUTH_FAILURE || + code == TSDB_CODE_RPC_INVALID_VERSION || code == TSDB_CODE_RPC_AUTH_REQUIRED || code == TSDB_CODE_MND_INVALID_USER || code == TSDB_CODE_RPC_NOT_READY) { pHead->msgLen = (int32_t)htonl((uint32_t)pHead->msgLen); // tTrace("%s, dont check authentication since code is:0x%x", pConn->info, code); diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c index 436f4de0988aa87836172b8c0284ca767d27c5d8..db100250a8570ec130f89856ce8fa55d493aeb13 100644 --- a/src/sync/src/syncMain.c +++ b/src/sync/src/syncMain.c @@ -375,6 +375,8 @@ int32_t syncReconfig(int64_t rid, const SSyncCfg *pNewCfg) { } int32_t syncForwardToPeer(int64_t rid, void *data, void *mhandle, int32_t qtype) { + if (rid <= 0) return 0; + SSyncNode *pNode = syncAcquireNode(rid); if (pNode == NULL) return 0; diff --git a/src/sync/src/syncRestore.c b/src/sync/src/syncRestore.c index a5e268cdd262ee1cd4bae6433de9c7c764e6561a..78520c660875ccd25d42857b89359fbd15f33b35 100644 --- a/src/sync/src/syncRestore.c +++ b/src/sync/src/syncRestore.c @@ -90,7 +90,7 @@ static int32_t syncRestoreFile(SSyncPeer *pPeer, uint64_t *fversion) { break; } - sDebug("%s, file:%s info is received from master, index:%d size:%" PRId64 " fver:%" PRIu64 " magic:%d", pPeer->id, + sDebug("%s, file:%s info is received from master, index:%d size:%" PRId64 " fver:%" PRIu64 " magic:%u", pPeer->id, minfo.name, minfo.index, minfo.size, minfo.fversion, minfo.magic); // remove extra files on slave between the current and last index @@ -100,13 +100,13 @@ static int32_t syncRestoreFile(SSyncPeer *pPeer, uint64_t *fversion) { // check the file info sinfo = minfo; sinfo.magic = (*pNode->getFileInfo)(pNode->vgId, sinfo.name, &sinfo.index, TAOS_SYNC_MAX_INDEX, &sinfo.size, &sinfo.fversion); - sDebug("%s, local file:%s info, index:%d size:%" PRId64 " fver:%" PRIu64 " magic:%d", pPeer->id, sinfo.name, + sDebug("%s, local file:%s info, index:%d size:%" PRId64 " fver:%" PRIu64 " magic:%u", pPeer->id, sinfo.name, sinfo.index, sinfo.size, sinfo.fversion, sinfo.magic); // if file not there or magic is not the same, file shall be synced memset(&fileAck, 0, sizeof(SFileAck)); syncBuildFileAck(&fileAck, pNode->vgId); - fileAck.sync = (sinfo.magic != minfo.magic || sinfo.name[0] == 0) ? 1 : 0; + fileAck.sync = (sinfo.magic != minfo.magic || sinfo.size != minfo.size || sinfo.name[0] == 0) ? 1 : 0; // send file ack ret = taosWriteMsg(pPeer->syncFd, &fileAck, sizeof(SFileAck)); @@ -195,7 +195,11 @@ static int32_t syncRestoreWal(SSyncPeer *pPeer, uint64_t *wver) { } lastVer = pHead->version; - (*pNode->writeToCache)(pNode->vgId, pHead, TAOS_QTYPE_WAL, NULL); + ret = (*pNode->writeToCache)(pNode->vgId, pHead, TAOS_QTYPE_WAL, NULL); + if (ret != 0) { + sError("%s, failed to restore record since %s, hver:%" PRIu64, pPeer->id, tstrerror(ret), pHead->version); + break; + } } if (code < 0) { diff --git a/src/sync/src/syncRetrieve.c b/src/sync/src/syncRetrieve.c index b3be1ace39586867f83cac650135ab0ced01afe4..153886102e807b9fe161585705691ff92b4186a0 100644 --- a/src/sync/src/syncRetrieve.c +++ b/src/sync/src/syncRetrieve.c @@ -104,7 +104,7 @@ static int32_t syncRetrieveFile(SSyncPeer *pPeer) { fileInfo.magic = (*pNode->getFileInfo)(pNode->vgId, fileInfo.name, &fileInfo.index, TAOS_SYNC_MAX_INDEX, &fileInfo.size, &fileInfo.fversion); syncBuildFileInfo(&fileInfo, pNode->vgId); - sDebug("%s, file:%s info is sent, index:%d size:%" PRId64 " fver:%" PRIu64 " magic:%d", pPeer->id, fileInfo.name, + sDebug("%s, file:%s info is sent, index:%d size:%" PRId64 " fver:%" PRIu64 " magic:%u", pPeer->id, fileInfo.name, fileInfo.index, fileInfo.size, fileInfo.fversion, fileInfo.magic); // send the file info @@ -143,10 +143,10 @@ static int32_t syncRetrieveFile(SSyncPeer *pPeer) { // if sync is not required, continue if (fileAck.sync == 0) { fileInfo.index++; - sDebug("%s, %s is the same", pPeer->id, fileInfo.name); + sDebug("%s, %s is the same, fver:%" PRIu64, pPeer->id, fileInfo.name, fileInfo.fversion); continue; } else { - sDebug("%s, %s will be sent", pPeer->id, fileInfo.name); + sDebug("%s, %s will be sent, fver:%" PRIu64, pPeer->id, fileInfo.name, fileInfo.fversion); } // get the full path to file @@ -328,7 +328,8 @@ static int32_t syncProcessLastWal(SSyncPeer *pPeer, char *wname, int64_t index) // if bytes > 0, file is updated, or fversion is not reached but file still open, read again once = 1; offset += bytes; - sDebug("%s, continue retrieve last wal, bytes:%d offset:%" PRId64, pPeer->id, bytes, offset); + sDebug("%s, continue retrieve last wal, bytes:%d offset:%" PRId64 " sver:%" PRIu64 " fver:%" PRIu64, pPeer->id, + bytes, offset, pPeer->sversion, fversion); } return -1; @@ -503,9 +504,10 @@ void *syncRetrieveData(void *param) { taosClose(pPeer->syncFd); // The ref is obtained in both the create thread and the current thread, so it is released twice + sInfo("%s, sync retrieve data over, sstatus:%s", pPeer->id, syncStatus[pPeer->sstatus]); + syncReleasePeer(pPeer); syncReleasePeer(pPeer); - sInfo("%s, sync retrieve data over, sstatus:%s", pPeer->id, syncStatus[pPeer->sstatus]); return NULL; } diff --git a/src/vnode/inc/vnodeRead.h b/src/vnode/inc/vnodeRead.h index f2953d79f4d07c3dac821e9a086d86c53647d9c7..f5375d6ab0c19277e2c39a987fd4428fae1885e1 100644 --- a/src/vnode/inc/vnodeRead.h +++ b/src/vnode/inc/vnodeRead.h @@ -27,6 +27,7 @@ void vnodeCleanupRead(void); int32_t vnodeWriteToRQueue(void *pVnode, void *pCont, int32_t contLen, int8_t qtype, void *rparam); void vnodeFreeFromRQueue(void *pVnode, SVReadMsg *pRead); int32_t vnodeProcessRead(void *pVnode, SVReadMsg *pRead); +void vnodeWaitReadCompleted(void *pVnode); #ifdef __cplusplus } diff --git a/src/vnode/inc/vnodeStatus.h b/src/vnode/inc/vnodeStatus.h index 00ac47df65fba91b9d7ef4b92ab5210de3652330..910a6d71b201fcdc9fbc1daa99fb57d0227d6093 100644 --- a/src/vnode/inc/vnodeStatus.h +++ b/src/vnode/inc/vnodeStatus.h @@ -37,6 +37,7 @@ bool vnodeSetResetStatus(SVnodeObj* pVnode); bool vnodeInInitStatus(SVnodeObj* pVnode); bool vnodeInReadyStatus(SVnodeObj* pVnode); +bool vnodeInReadyOrUpdatingStatus(SVnodeObj* pVnode); bool vnodeInClosingStatus(SVnodeObj* pVnode); bool vnodeInResetStatus(SVnodeObj* pVnode); diff --git a/src/vnode/inc/vnodeWrite.h b/src/vnode/inc/vnodeWrite.h index 8b3f0fdb58c8a510bcfc6da3aa36adb85297efca..5238e45b81fc7955e592970fd6634199940470e0 100644 --- a/src/vnode/inc/vnodeWrite.h +++ b/src/vnode/inc/vnodeWrite.h @@ -27,6 +27,7 @@ void vnodeCleanupWrite(void); int32_t vnodeWriteToWQueue(void *pVnode, void *pHead, int32_t qtype, void *pRpcMsg); void vnodeFreeFromWQueue(void *pVnode, SVWriteMsg *pWrite); int32_t vnodeProcessWrite(void *pVnode, void *pHead, int32_t qtype, void *pRspRet); +void vnodeWaitWriteCompleted(void *pVnode); #ifdef __cplusplus } diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index 8f44a248ced6157195955fdf79ec713ea6f25072..eb43fba0798085d84c793a145b082325e6d56a15 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -153,6 +153,11 @@ static int32_t vnodeAlterImp(SVnodeObj *pVnode, SCreateVnodeMsg *pVnodeCfg) { int32_t vnodeAlter(void *vparam, SCreateVnodeMsg *pVnodeCfg) { SVnodeObj *pVnode = vparam; + if (pVnode->dbCfgVersion == pVnodeCfg->cfg.dbCfgVersion && pVnode->vgCfgVersion == pVnodeCfg->cfg.vgCfgVersion) { + vDebug("vgId:%d, dbCfgVersion:%d and vgCfgVersion:%d not change", pVnode->vgId, pVnode->dbCfgVersion, + pVnode->vgCfgVersion); + return TSDB_CODE_SUCCESS; + } // vnode in non-ready state and still needs to return success instead of TSDB_CODE_VND_INVALID_STATUS // dbCfgVersion can be corrected by status msg @@ -411,15 +416,12 @@ void vnodeDestroy(SVnodeObj *pVnode) { } void vnodeCleanUp(SVnodeObj *pVnode) { - if (!vnodeInInitStatus(pVnode)) { - // it may be in updateing or reset state, then it shall wait - int32_t i = 0; - while (!vnodeSetClosingStatus(pVnode)) { - if (++i % 1000 == 0) { - sched_yield(); - } - } - } + vDebug("vgId:%d, vnode will cleanup, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode); + + vnodeSetClosingStatus(pVnode); + + // release local resources only after cutting off outside connections + qQueryMgmtNotifyClosed(pVnode->qMgmt); // stop replication module if (pVnode->sync > 0) { @@ -428,10 +430,7 @@ void vnodeCleanUp(SVnodeObj *pVnode) { syncStop(sync); } - vDebug("vgId:%d, vnode will cleanup, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode); - - // release local resources only after cutting off outside connections - qQueryMgmtNotifyClosed(pVnode->qMgmt); + vDebug("vgId:%d, vnode is cleaned, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode); vnodeRelease(pVnode); } diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c index a972ffec1cb5279b191dc1256b85c28e0e5cd9eb..c864bc995bb58e81535111b1f2c7654699578930 100644 --- a/src/vnode/src/vnodeRead.c +++ b/src/vnode/src/vnodeRead.c @@ -88,22 +88,15 @@ void vnodeFreeFromRQueue(void *vparam, SVReadMsg *pRead) { vnodeRelease(pVnode); } -int32_t vnodeWriteToRQueue(void *vparam, void *pCont, int32_t contLen, int8_t qtype, void *rparam) { - SVnodeObj *pVnode = vparam; - - if (qtype == TAOS_QTYPE_RPC || qtype == TAOS_QTYPE_QUERY) { - int32_t code = vnodeCheckRead(pVnode); - if (code != TSDB_CODE_SUCCESS) return code; - } - +static SVReadMsg *vnodeBuildVReadMsg(SVnodeObj *pVnode, void *pCont, int32_t contLen, int8_t qtype, SRpcMsg *pRpcMsg) { int32_t size = sizeof(SVReadMsg) + contLen; SVReadMsg *pRead = taosAllocateQitem(size); if (pRead == NULL) { - return TSDB_CODE_VND_OUT_OF_MEMORY; + terrno = TSDB_CODE_VND_OUT_OF_MEMORY; + return NULL; } - if (rparam != NULL) { - SRpcMsg *pRpcMsg = rparam; + if (pRpcMsg != NULL) { pRead->rpcHandle = pRpcMsg->handle; pRead->rpcAhandle = pRpcMsg->ahandle; pRead->msgType = pRpcMsg->msgType; @@ -119,13 +112,35 @@ int32_t vnodeWriteToRQueue(void *vparam, void *pCont, int32_t contLen, int8_t qt pRead->qtype = qtype; atomic_add_fetch_32(&pVnode->refCount, 1); + + return pRead; +} + +int32_t vnodeWriteToRQueue(void *vparam, void *pCont, int32_t contLen, int8_t qtype, void *rparam) { + SVReadMsg *pRead = vnodeBuildVReadMsg(vparam, pCont, contLen, qtype, rparam); + if (pRead == NULL) { + assert(terrno != 0); + return terrno; + } + + SVnodeObj *pVnode = vparam; + + int32_t code = vnodeCheckRead(pVnode); + if (code != TSDB_CODE_SUCCESS) { + taosFreeQitem(pRead); + vnodeRelease(pVnode); + return code; + } + atomic_add_fetch_32(&pVnode->queuedRMsg, 1); - if (pRead->code == TSDB_CODE_RPC_NETWORK_UNAVAIL || pRead->msgType == TSDB_MSG_TYPE_FETCH) { - vTrace("vgId:%d, write into vfetch queue, refCount:%d queued:%d", pVnode->vgId, pVnode->refCount, pVnode->queuedRMsg); + if (pRead->code == TSDB_CODE_RPC_NETWORK_UNAVAIL || pRead->msgType == TSDB_MSG_TYPE_FETCH) { + vTrace("vgId:%d, write into vfetch queue, refCount:%d queued:%d", pVnode->vgId, pVnode->refCount, + pVnode->queuedRMsg); return taosWriteQitem(pVnode->fqueue, qtype, pRead); } else { - vTrace("vgId:%d, write into vquery queue, refCount:%d queued:%d", pVnode->vgId, pVnode->refCount, pVnode->queuedRMsg); + vTrace("vgId:%d, write into vquery queue, refCount:%d queued:%d", pVnode->vgId, pVnode->refCount, + pVnode->queuedRMsg); return taosWriteQitem(pVnode->qqueue, qtype, pRead); } } @@ -420,3 +435,5 @@ int32_t vnodeNotifyCurrentQhandle(void *handle, void *qhandle, int32_t vgId) { vTrace("QInfo:%p register qhandle to connect:%p", qhandle, handle); return rpcReportProgress(handle, (char *)pMsg, sizeof(SRetrieveTableMsg)); } + +void vnodeWaitReadCompleted(void *pVnode) {} \ No newline at end of file diff --git a/src/vnode/src/vnodeStatus.c b/src/vnode/src/vnodeStatus.c index d09a6a86631837db6799aca5b4df87ae22e07853..0bff062f09710e36c8bc94b7808d38d0e54b56c7 100644 --- a/src/vnode/src/vnodeStatus.c +++ b/src/vnode/src/vnodeStatus.c @@ -15,6 +15,8 @@ #define _DEFAULT_SOURCE #include "os.h" +#include "taosmsg.h" +#include "query.h" #include "vnodeStatus.h" char* vnodeStatus[] = { @@ -44,11 +46,13 @@ bool vnodeSetReadyStatus(SVnodeObj* pVnode) { vDebug("vgId:%d, cannot set status:ready, old:%s", pVnode->vgId, vnodeStatus[pVnode->status]); } + qQueryMgmtReOpen(pVnode->qMgmt); + pthread_mutex_unlock(&pVnode->statusMutex); return set; } -bool vnodeSetClosingStatus(SVnodeObj* pVnode) { +static bool vnodeSetClosingStatusImp(SVnodeObj* pVnode) { bool set = false; pthread_mutex_lock(&pVnode->statusMutex); @@ -63,6 +67,20 @@ bool vnodeSetClosingStatus(SVnodeObj* pVnode) { return set; } +bool vnodeSetClosingStatus(SVnodeObj* pVnode) { + if (!vnodeInInitStatus(pVnode)) { + // it may be in updating or reset state, then it shall wait + int32_t i = 0; + while (!vnodeSetClosingStatusImp(pVnode)) { + if (++i % 1000 == 0) { + sched_yield(); + } + } + } + + return true; +} + bool vnodeSetUpdatingStatus(SVnodeObj* pVnode) { bool set = false; pthread_mutex_lock(&pVnode->statusMutex); @@ -117,6 +135,18 @@ bool vnodeInReadyStatus(SVnodeObj* pVnode) { return in; } +bool vnodeInReadyOrUpdatingStatus(SVnodeObj* pVnode) { + bool in = false; + pthread_mutex_lock(&pVnode->statusMutex); + + if (pVnode->status == TAOS_VN_STATUS_READY || pVnode->status == TAOS_VN_STATUS_UPDATING) { + in = true; + } + + pthread_mutex_unlock(&pVnode->statusMutex); + return in; +} + bool vnodeInClosingStatus(SVnodeObj* pVnode) { bool in = false; pthread_mutex_lock(&pVnode->statusMutex); diff --git a/src/vnode/src/vnodeWrite.c b/src/vnode/src/vnodeWrite.c index a3a88e8b7b143684f047ac395b91e830c2884fe8..5c2e871eb646a006484bcdc34995bda70a3ede8d 100644 --- a/src/vnode/src/vnodeWrite.c +++ b/src/vnode/src/vnodeWrite.c @@ -90,7 +90,10 @@ int32_t vnodeProcessWrite(void *vparam, void *wparam, int32_t qtype, void *rpara // write into WAL code = walWrite(pVnode->wal, pHead); - if (code < 0) return code; + if (code < 0) { + vError("vgId:%d, hver:%" PRIu64 " vver:%" PRIu64 " code:0x%x", pVnode->vgId, pHead->version, pVnode->version, code); + return code; + } pVnode->version = pHead->version; @@ -101,8 +104,7 @@ int32_t vnodeProcessWrite(void *vparam, void *wparam, int32_t qtype, void *rpara return syncCode; } -static int32_t vnodeCheckWrite(void *vparam) { - SVnodeObj *pVnode = vparam; +static int32_t vnodeCheckWrite(SVnodeObj *pVnode) { if (!(pVnode->accessState & TSDB_VN_WRITE_ACCCESS)) { vDebug("vgId:%d, no write auth, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode); return TSDB_CODE_VND_NO_WRITE_AUTH; @@ -121,12 +123,6 @@ static int32_t vnodeCheckWrite(void *vparam) { return TSDB_CODE_APP_NOT_READY; } - if (vnodeInClosingStatus(pVnode)) { - vDebug("vgId:%d, vnode status is %s, refCount:%d pVnode:%p", pVnode->vgId, vnodeStatus[pVnode->status], - pVnode->refCount, pVnode); - return TSDB_CODE_APP_NOT_READY; - } - if (pVnode->isFull) { vDebug("vgId:%d, vnode is full, refCount:%d", pVnode->vgId, pVnode->refCount); return TSDB_CODE_VND_IS_FULL; @@ -216,29 +212,21 @@ static int32_t vnodeProcessUpdateTagValMsg(SVnodeObj *pVnode, void *pCont, SRspR return TSDB_CODE_SUCCESS; } -int32_t vnodeWriteToWQueue(void *vparam, void *wparam, int32_t qtype, void *rparam) { - SVnodeObj *pVnode = vparam; - SWalHead * pHead = wparam; - int32_t code = 0; - - if (qtype == TAOS_QTYPE_RPC) { - code = vnodeCheckWrite(pVnode); - if (code != TSDB_CODE_SUCCESS) return code; - } - +static SVWriteMsg *vnodeBuildVWriteMsg(SVnodeObj *pVnode, SWalHead *pHead, int32_t qtype, SRpcMsg *pRpcMsg) { if (pHead->len > TSDB_MAX_WAL_SIZE) { vError("vgId:%d, wal len:%d exceeds limit, hver:%" PRIu64, pVnode->vgId, pHead->len, pHead->version); - return TSDB_CODE_WAL_SIZE_LIMIT; + terrno = TSDB_CODE_WAL_SIZE_LIMIT; + return NULL; } int32_t size = sizeof(SVWriteMsg) + sizeof(SWalHead) + pHead->len; SVWriteMsg *pWrite = taosAllocateQitem(size); if (pWrite == NULL) { - return TSDB_CODE_VND_OUT_OF_MEMORY; + terrno = TSDB_CODE_VND_OUT_OF_MEMORY; + return NULL; } - if (rparam != NULL) { - SRpcMsg *pRpcMsg = rparam; + if (pRpcMsg != NULL) { pWrite->rpcMsg = *pRpcMsg; } @@ -248,6 +236,30 @@ int32_t vnodeWriteToWQueue(void *vparam, void *wparam, int32_t qtype, void *rpar atomic_add_fetch_32(&pVnode->refCount, 1); + return pWrite; +} + +static int32_t vnodeWriteToWQueueImp(SVWriteMsg *pWrite) { + SVnodeObj *pVnode = pWrite->pVnode; + + if (pWrite->qtype == TAOS_QTYPE_RPC) { + int32_t code = vnodeCheckWrite(pVnode); + if (code != TSDB_CODE_SUCCESS) { + vError("vgId:%d, failed to write into vwqueue since %s", pVnode->vgId, tstrerror(code)); + taosFreeQitem(pWrite); + vnodeRelease(pVnode); + return code; + } + } + + if (!vnodeInReadyOrUpdatingStatus(pVnode)) { + vError("vgId:%d, failed to write into vwqueue, vstatus is %s, refCount:%d pVnode:%p", pVnode->vgId, + vnodeStatus[pVnode->status], pVnode->refCount, pVnode); + taosFreeQitem(pWrite); + vnodeRelease(pVnode); + return TSDB_CODE_APP_NOT_READY; + } + int32_t queued = atomic_add_fetch_32(&pVnode->queuedWMsg, 1); if (queued > MAX_QUEUED_MSG_NUM) { int32_t ms = (queued / MAX_QUEUED_MSG_NUM) * 10 + 3; @@ -256,15 +268,25 @@ int32_t vnodeWriteToWQueue(void *vparam, void *wparam, int32_t qtype, void *rpar taosMsleep(ms); } - code = vnodePerformFlowCtrl(pWrite); - if (code != 0) return 0; - vTrace("vgId:%d, write into vwqueue, refCount:%d queued:%d", pVnode->vgId, pVnode->refCount, pVnode->queuedWMsg); - taosWriteQitem(pVnode->wqueue, qtype, pWrite); + taosWriteQitem(pVnode->wqueue, pWrite->qtype, pWrite); return TSDB_CODE_SUCCESS; } +int32_t vnodeWriteToWQueue(void *vparam, void *wparam, int32_t qtype, void *rparam) { + SVWriteMsg *pWrite = vnodeBuildVWriteMsg(vparam, wparam, qtype, rparam); + if (pWrite == NULL) { + assert(terrno != 0); + return terrno; + } + + int32_t code = vnodePerformFlowCtrl(pWrite); + if (code != 0) return 0; + + return vnodeWriteToWQueueImp(pWrite); +} + void vnodeFreeFromWQueue(void *vparam, SVWriteMsg *pWrite) { SVnodeObj *pVnode = vparam; @@ -294,7 +316,10 @@ static void vnodeFlowCtrlMsgToWQueue(void *param, void *tmrId) { vDebug("vgId:%d, msg:%p, write into vwqueue after flowctrl, retry:%d", pVnode->vgId, pWrite, pWrite->processedCount); pWrite->processedCount = 0; - taosWriteQitem(pVnode->wqueue, pWrite->qtype, pWrite); + code = vnodeWriteToWQueueImp(pWrite); + if (code != 0) { + dnodeSendRpcVWriteRsp(pWrite->pVnode, pWrite, code); + } } } } @@ -319,3 +344,5 @@ static int32_t vnodePerformFlowCtrl(SVWriteMsg *pWrite) { return TSDB_CODE_VND_ACTION_IN_PROGRESS; } } + +void vnodeWaitWriteCompleted(void *pVnode) {} \ No newline at end of file diff --git a/tests/examples/C#/taosdemo/README.md b/tests/examples/C#/taosdemo/README.md new file mode 100644 index 0000000000000000000000000000000000000000..82a8dc674af9ecd83d6e777a0e2739cabf598536 --- /dev/null +++ b/tests/examples/C#/taosdemo/README.md @@ -0,0 +1,39 @@ +install build environment +=== +yum/apt install mono-complete + +build C# version taosdemo +=== +mcs -out:taosdemo *.cs + +run C# version taosdemo +=== +Usage: mono taosdemo.exe [OPTION...] + + --help Show usage. + + -h host, The host to connect to TDengine. Default is localhost. + -p port, The TCP/IP port number to use for the connection. Default is 0. + -u user, The user name to use when connecting to the server. Default is 'root'. + -P password, The password to use when connecting to the server. Default is 'taosdata'. + -d database, Destination database. Default is 'test'. + -a replica, Set the replica parameters of the database, Default 1, min: 1, max: 5. + -m table_prefix, Table prefix name. Default is 't'. + -s sql file, The select sql file. + -M stable, Use super table. + -o outputfile, Direct output to the named file. Default is './output.txt'. + -q query_mode, Query mode--0: SYNC, 1: ASYNC. Default is SYNC. + -b type_of_cols, data_type of columns: 'INT', 'TINYINT', 'SMALLINT', 'BIGINT', 'FLOAT', 'DOUBLE', 'BINARY'. Default is 'INT'. + -w length_of_binary, The length of data_type 'BINARY'. Only applicable when type of cols is 'BINARY'. Default is 8 + -l num_of_cols_per_record, The number of columns per record. Default is 3. + -T num_of_threads, The number of threads. Default is 10. + -r num_of_records_per_req, The number of records per request. Default is 1000. + -t num_of_tables, The number of tables. Default is 1. + -n num_of_records_per_table, The number of records per table. Default is 1. + -c config_directory, Configuration directory. Default is '/etc/taos/'. + -x flag, Insert only flag. + -O order, Insert mode--0: In order, 1: Out of order. Default is in order. + -R rate, Out of order data's rate--if order=1 Default 10, min: 0, max: 50. + -D Delete data methods 0: don't delete, 1: delete by table, 2: delete by stable, 3: delete by database. + -v Print verbose output + -y Skip read key for continous test, default is not skip diff --git a/tests/examples/C#/taosdemo/TDengineDriver.cs b/tests/examples/C#/taosdemo/TDengineDriver.cs new file mode 100644 index 0000000000000000000000000000000000000000..205269501d376a4753b3aedbfa8d512b2df31600 --- /dev/null +++ b/tests/examples/C#/taosdemo/TDengineDriver.cs @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +using System; +using System.Collections.Generic; +using System.Runtime.InteropServices; + +namespace TDengineDriver +{ + enum TDengineDataType + { + TSDB_DATA_TYPE_NULL = 0, // 1 bytes + TSDB_DATA_TYPE_BOOL = 1, // 1 bytes + TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes + TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes + TSDB_DATA_TYPE_INT = 4, // 4 bytes + TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes + TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes + TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes + TSDB_DATA_TYPE_BINARY = 8, // string + TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes + TSDB_DATA_TYPE_NCHAR = 10 // unicode string + } + + enum TDengineInitOption + { + TSDB_OPTION_LOCALE = 0, + TSDB_OPTION_CHARSET = 1, + TSDB_OPTION_TIMEZONE = 2, + TDDB_OPTION_CONFIGDIR = 3, + TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4 + } + + class TDengineMeta + { + public string name; + public short size; + public byte type; + public string TypeName() + { + switch ((TDengineDataType)type) + { + case TDengineDataType.TSDB_DATA_TYPE_BOOL: + return "BOOLEAN"; + case TDengineDataType.TSDB_DATA_TYPE_TINYINT: + return "BYTE"; + case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: + return "SHORT"; + case TDengineDataType.TSDB_DATA_TYPE_INT: + return "INT"; + case TDengineDataType.TSDB_DATA_TYPE_BIGINT: + return "LONG"; + case TDengineDataType.TSDB_DATA_TYPE_FLOAT: + return "FLOAT"; + case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: + return "DOUBLE"; + case TDengineDataType.TSDB_DATA_TYPE_BINARY: + return "STRING"; + case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: + return "TIMESTAMP"; + case TDengineDataType.TSDB_DATA_TYPE_NCHAR: + return "NCHAR"; + default: + return "undefine"; + } + } + } + + class TDengine + { + public const int TSDB_CODE_SUCCESS = 0; + + [DllImport("taos", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)] + static extern public void Init(); + + [DllImport("taos", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)] + static extern public void Cleanup(); + + [DllImport("taos", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)] + static extern public void Options(int option, string value); + + [DllImport("taos", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr Connect(string ip, string user, string password, string db, short port); + + [DllImport("taos", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)] + static extern private IntPtr taos_errstr(IntPtr res); + static public string Error(IntPtr res) + { + IntPtr errPtr = taos_errstr(res); + return Marshal.PtrToStringAnsi(errPtr); + } + + [DllImport("taos", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)] + static extern public int ErrorNo(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr Query(IntPtr conn, string sqlstr); + + [DllImport("taos", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)] + static extern public int AffectRows(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)] + static extern public int FieldCount(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)] + static extern private IntPtr taos_fetch_fields(IntPtr res); + static public List FetchFields(IntPtr res) + { + const int fieldSize = 68; + + List metas = new List(); + if (res == IntPtr.Zero) + { + return metas; + } + + int fieldCount = FieldCount(res); + IntPtr fieldsPtr = taos_fetch_fields(res); + + for (int i = 0; i < fieldCount; ++i) + { + int offset = i * fieldSize; + + TDengineMeta meta = new TDengineMeta(); + meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset); + meta.type = Marshal.ReadByte(fieldsPtr + offset + 65); + meta.size = Marshal.ReadInt16(fieldsPtr + offset + 66); + metas.Add(meta); + } + + return metas; + } + + [DllImport("taos", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr FetchRows(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr FreeResult(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)] + static extern public int Close(IntPtr taos); + } +} diff --git a/tests/examples/C#/taosdemo/taosdemo.cs b/tests/examples/C#/taosdemo/taosdemo.cs new file mode 100644 index 0000000000000000000000000000000000000000..7e7c18db26c49c6c7a349fe497d6c9c792756a0c --- /dev/null +++ b/tests/examples/C#/taosdemo/taosdemo.cs @@ -0,0 +1,782 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +using System; +using System.Text; +using System.Collections.Generic; +using System.Runtime.InteropServices; +using System.Collections; +using System.Threading; +using System.Diagnostics; + +namespace TDengineDriver +{ + class TDengineTest + { + //connect parameters + private string host = "127.0.0.1"; + private string configDir = "C:/TDengine/cfg"; + private string user = "root"; + private string password = "taosdata"; + private short port = 0; + + //sql parameters + private string dbName = "db"; + private string stableName = "st"; + private string tablePrefix = "t"; + + private bool isInsertOnly = false; + private int queryMode = 1; + + private long recordsPerTable = 1; + private int recordsPerRequest = 1; + private int colsPerRecord = 3; + private long batchRows = 1000; + private long numOfTables = 1; + private short replica = 1; + + private IntPtr conn = IntPtr.Zero; + // private long rowsInserted = 0; + private bool useStable = false; + private short methodOfDelete = 0; + private long numOfThreads = 1; + private long rateOfOutorder = 0; + private bool order = true; + private bool skipReadKey = false; + private bool verbose = false; + + + static void PrintHelp(String[] argv) + { + for (int i = 0; i < argv.Length; ++i) + { + if ("--help" == argv[i]) + { + Console.WriteLine("Usage: mono taosdemo.exe [OPTION...]"); + Console.WriteLine(""); + string indent = " "; + Console.WriteLine("{0}{1}", indent, "--help Show usage."); + Console.WriteLine(""); + Console.Write("{0}{1}", indent, "-h"); + Console.Write("{0}{1}{2}\n", indent, indent, "host, The host to connect to TDengine. Default is localhost."); + Console.Write("{0}{1}", indent, "-p"); + Console.Write("{0}{1}{2}\n", indent, indent, "port, The TCP/IP port number to use for the connection. Default is 0."); + Console.Write("{0}{1}", indent, "-u"); + Console.Write("{0}{1}{2}\n", indent, indent, "user, The user name to use when connecting to the server. Default is 'root'."); + Console.Write("{0}{1}", indent, "-P"); + Console.Write("{0}{1}{2}\n", indent, indent, "password, The password to use when connecting to the server. Default is 'taosdata'."); + Console.Write("{0}{1}", indent, "-d"); + Console.Write("{0}{1}{2}\n", indent, indent, "database, Destination database. Default is 'test'."); + Console.Write("{0}{1}", indent, "-a"); + Console.Write("{0}{1}{2}\n", indent, indent, "replica, Set the replica parameters of the database, Default 1, min: 1, max: 5."); + Console.Write("{0}{1}", indent, "-m"); + Console.Write("{0}{1}{2}\n", indent, indent, "table_prefix, Table prefix name. Default is 't'."); + Console.Write("{0}{1}", indent, "-s"); + Console.Write("{0}{1}{2}\n", indent, indent, "sql file, The select sql file."); + Console.Write("{0}{1}", indent, "-M"); + Console.Write("{0}{1}{2}\n", indent, indent, "stable, Use super table."); + Console.Write("{0}{1}", indent, "-o"); + Console.Write("{0}{1}{2}\n", indent, indent, "outputfile, Direct output to the named file. Default is './output.txt'."); + Console.Write("{0}{1}", indent, "-q"); + Console.Write("{0}{1}{2}\n", indent, indent, "query_mode, Query mode--0: SYNC, 1: ASYNC. Default is SYNC."); + Console.Write("{0}{1}", indent, "-b"); + Console.Write("{0}{1}{2}\n", indent, indent, "type_of_cols, data_type of columns: 'INT', 'TINYINT', 'SMALLINT', 'BIGINT', 'FLOAT', 'DOUBLE', 'BINARY'. Default is 'INT'."); + Console.Write("{0}{1}", indent, "-w"); + Console.Write("{0}{1}{2}\n", indent, indent, "length_of_binary, The length of data_type 'BINARY'. Only applicable when type of cols is 'BINARY'. Default is 8"); + Console.Write("{0}{1}", indent, "-l"); + Console.Write("{0}{1}{2}\n", indent, indent, "num_of_cols_per_record, The number of columns per record. Default is 3."); + Console.Write("{0}{1}", indent, "-T"); + Console.Write("{0}{1}{2}\n", indent, indent, "num_of_threads, The number of threads. Default is 10."); + Console.Write("{0}{1}", indent, "-r"); + Console.Write("{0}{1}{2}\n", indent, indent, "num_of_records_per_req, The number of records per request. Default is 1000."); + Console.Write("{0}{1}", indent, "-t"); + Console.Write("{0}{1}{2}\n", indent, indent, "num_of_tables, The number of tables. Default is 1."); + Console.Write("{0}{1}", indent, "-n"); + Console.Write("{0}{1}{2}\n", indent, indent, "num_of_records_per_table, The number of records per table. Default is 1."); + Console.Write("{0}{1}", indent, "-c"); + Console.Write("{0}{1}{2}\n", indent, indent, "config_directory, Configuration directory. Default is '/etc/taos/'."); + Console.Write("{0}{1}", indent, "-x"); + Console.Write("{0}{1}{2}\n", indent, indent, "flag, Insert only flag."); + Console.Write("{0}{1}", indent, "-O"); + Console.Write("{0}{1}{2}\n", indent, indent, "order, Insert mode--0: In order, 1: Out of order. Default is in order."); + Console.Write("{0}{1}", indent, "-R"); + Console.Write("{0}{1}{2}\n", indent, indent, "rate, Out of order data's rate--if order=1 Default 10, min: 0, max: 50."); + Console.Write("{0}{1}", indent, "-D"); + Console.Write("{0}{1}{2}\n", indent, indent, "Delete data methods 0: don't delete, 1: delete by table, 2: delete by stable, 3: delete by database."); + Console.Write("{0}{1}", indent, "-v"); + Console.Write("{0}{1}{2}\n", indent, indent, "Print verbose output"); + Console.Write("{0}{1}", indent, "-y"); + Console.Write("{0}{1}{2}\n", indent, indent, "Skip read key for continous test, default is not skip"); + + System.Environment.Exit(0); + } + } + } + + public void ReadArgument(String[] argv) + { + host = this.GetArgumentAsString(argv, "-h", "127.0.0.1"); + port = (short)this.GetArgumentAsLong(argv, "-p", 0, 65535, 6030); + user = this.GetArgumentAsString(argv, "-u", "root"); + password = this.GetArgumentAsString(argv, "-P", "taosdata"); + dbName = this.GetArgumentAsString(argv, "-d", "db"); + stableName = this.GetArgumentAsString(argv, "-s", "st"); + tablePrefix = this.GetArgumentAsString(argv, "-m", "t"); + isInsertOnly = this.GetArgumentAsFlag(argv, "-x"); + queryMode = (int)this.GetArgumentAsLong(argv, "-q", 0, 1, 0); + numOfTables = this.GetArgumentAsLong(argv, "-t", 1, 1000000000, 1); + batchRows = this.GetArgumentAsLong(argv, "-r", 1, 10000, 1000); + recordsPerTable = this.GetArgumentAsLong(argv, "-n", 1, 100000000000, 1); + recordsPerRequest = (int)this.GetArgumentAsLong(argv, "-r", 1, 10000, 1); + colsPerRecord = (int)this.GetArgumentAsLong(argv, "-l", 1, 1024, 3); + configDir = this.GetArgumentAsString(argv, "-c", "C:/TDengine/cfg"); + useStable = this.GetArgumentAsFlag(argv, "-M"); + + replica = (short)this.GetArgumentAsLong(argv, "-a", 1, 5, 1); + methodOfDelete = (short)this.GetArgumentAsLong(argv, "-D", 0, 3, 0); + numOfThreads = (short)this.GetArgumentAsLong(argv, "-T", 1, 10000, 1); + order = this.GetArgumentAsFlag(argv, "-O"); + rateOfOutorder = this.GetArgumentAsLong(argv, "-R", 0, 100, 0); + + skipReadKey = this.GetArgumentAsFlag(argv, "-y"); + verbose = this.GetArgumentAsFlag(argv, "-v"); + + Console.Write("###################################################################\n"); + Console.Write("# Server IP: {0}\n", host); + Console.Write("# User: {0}\n", user); + Console.Write("# Password: {0}\n", password); + Console.Write("# Number of Columns per record: {0}\n", colsPerRecord); + Console.Write("# Number of Threads: {0}\n", numOfThreads); + Console.Write("# Number of Tables: {0}\n", numOfTables); + Console.Write("# Number of Data per Table: {0}\n", recordsPerTable); + Console.Write("# Records/Request: {0}\n", recordsPerRequest); + Console.Write("# Database name: {0}\n", dbName); + Console.Write("# Replica: {0}\n", replica); + Console.Write("# Use STable: {0}\n", useStable); + Console.Write("# Table prefix: {0}\n", tablePrefix); + Console.Write("# Data order: {0}\n", order); + Console.Write("# Data out of order rate: {0}\n", rateOfOutorder); + Console.Write("# Delete method: {0}\n", methodOfDelete); + Console.Write("# Query Mode: {0}\n", queryMode); + Console.Write("# Insert Only: {0}\n", isInsertOnly); + Console.Write("# Verbose output {0}\n", verbose); + Console.Write("# Test time: {0}\n", DateTime.Now.ToString("h:mm:ss tt")); + + Console.Write("###################################################################\n"); + + if (skipReadKey == false) + { + Console.Write("Press any key to continue..\n"); + Console.ReadKey(); + } + } + + public bool GetArgumentAsFlag(String[] argv, String argName) + { + int argc = argv.Length; + for (int i = 0; i < argc; ++i) + { + if (argName == argv[i]) + { + return true; + } + } + return false; + } + + public long GetArgumentAsLong(String[] argv, String argName, int minVal, long maxVal, int defaultValue) + { + int argc = argv.Length; + for (int i = 0; i < argc; ++i) + { + if (argName != argv[i]) + { + continue; + } + if (i < argc - 1) + { + String tmp = argv[i + 1]; + if (tmp[0] == '-') + { + Console.WriteLine("option {0:G} requires an argument", tmp); + ExitProgram(); + } + + long tmpVal = Convert.ToInt64(tmp); + if (tmpVal < minVal || tmpVal > maxVal) + { + Console.WriteLine("option {0:G} should in range [{1:G}, {2:G}]", argName, minVal, maxVal); + ExitProgram(); + } + + return tmpVal; + } + } + + return defaultValue; + } + + public String GetArgumentAsString(String[] argv, String argName, String defaultValue) + { + int argc = argv.Length; + for (int i = 0; i < argc; ++i) + { + if (argName != argv[i]) + { + continue; + } + if (i < argc - 1) + { + String tmp = argv[i + 1]; + if (tmp[0] == '-') + { + Console.WriteLine("option {0:G} requires an argument", tmp); + ExitProgram(); + } + return tmp; + } + } + + return defaultValue; + } + + static void ExitProgram() + { + TDengine.Cleanup(); + System.Environment.Exit(0); + } + + private void DebugPrintFormat(string format, params object[] parameters) + { + if (verbose == true) + { + Console.Write(format, parameters); + } + } + + private void DebugPrint(string str) + { + if (verbose == true) + { + Console.Write(str); + } + } + + public void InitTDengine() + { + TDengine.Options((int)TDengineInitOption.TDDB_OPTION_CONFIGDIR, this.configDir); + TDengine.Options((int)TDengineInitOption.TDDB_OPTION_SHELL_ACTIVITY_TIMER, "60"); + TDengine.Init(); + DebugPrint("TDengine Initialization finished\n"); + } + + public void ConnectTDengine() + { + string db = ""; + DebugPrintFormat("host:{0} user:{1}, pass:{2}; db:{3}, port:{4}\n", + this.host, this.user, this.password, db, this.port); + this.conn = TDengine.Connect(this.host, this.user, this.password, db, this.port); + if (this.conn == IntPtr.Zero) + { + Console.WriteLine("Connect to TDengine failed"); + ExitProgram(); + } + else + { + DebugPrint("Connect to TDengine success\n"); + } + } + + public void CreateTablesByThreads() + { + Thread[] threadArr = new Thread[numOfThreads]; + + long quotition = numOfTables / numOfThreads; + if (quotition < 1) + { + numOfThreads = numOfTables; + quotition = 1; + } + + long remainder = 0; + if (numOfThreads != 0) + { + remainder = numOfTables % numOfThreads; + } + + long last = 0; + + for (int i = 0; i < numOfThreads; i++) + { + CreateTableThread createTableThread = new CreateTableThread(); + createTableThread.id = i; + createTableThread.verbose = verbose; + createTableThread.dbName = this.dbName; + createTableThread.tablePrefix = this.tablePrefix; + createTableThread.useStable = useStable; + if (useStable) + { + createTableThread.stableName = stableName; + } + createTableThread.conn = conn; + + createTableThread.start = last; + if (i < remainder) + { + createTableThread.end = last + quotition; + } + else + { + createTableThread.end = last + quotition - 1; + } + last = createTableThread.end + 1; + + threadArr[i] = new Thread(createTableThread.ThreadMain); + threadArr[i].Start(); + threadArr[i].Join(); + } + } + + public void dropDatabase() + { + StringBuilder sql = new StringBuilder(); + sql.Append("DROP DATABASE IF EXISTS ").Append(this.dbName); + IntPtr res = TDengine.Query(this.conn, sql.ToString()); + if (res != IntPtr.Zero) + { + DebugPrint(sql.ToString() + " success\n"); + } + else + { + Console.WriteLine(sql.ToString() + " failure, reason: " + TDengine.Error(res)); + ExitProgram(); + } + + } + + public void CreateDb() + { + StringBuilder sql = new StringBuilder(); + sql.Append("CREATE DATABASE IF NOT EXISTS ").Append(this.dbName).Append(" replica ").Append(this.replica); + IntPtr res = TDengine.Query(this.conn, sql.ToString()); + if (res != IntPtr.Zero) + { + DebugPrint(sql.ToString() + " success\n"); + } + else + { + Console.WriteLine(sql.ToString() + " failure, reason: " + TDengine.Error(res)); + ExitProgram(); + } + TDengine.FreeResult(res); + } + + public void CreateStable() + { + StringBuilder sql = new StringBuilder(); + + sql.Clear(); + sql.Append("CREATE TABLE IF NOT EXISTS "). + Append(this.dbName).Append(".").Append(this.stableName). + Append("(ts timestamp, v1 bool, v2 tinyint, v3 smallint, v4 int, v5 bigint, v6 float, v7 double, v8 binary(10), v9 nchar(10)) tags(t1 int)"); + IntPtr res = TDengine.Query(this.conn, sql.ToString()); + if (res != IntPtr.Zero) + { + DebugPrint(sql.ToString() + " success\n"); + } + else + { + Console.WriteLine(sql.ToString() + " failure, reason: " + TDengine.Error(res)); + ExitProgram(); + } + TDengine.FreeResult(res); + } + + public void InsertByThreads() + { + Thread[] threadArr = new Thread[numOfThreads]; + + long quotition = numOfTables / numOfThreads; + if (quotition < 1) + { + numOfThreads = numOfTables; + quotition = 1; + } + + long remainder = 0; + if (numOfThreads != 0) + { + remainder = numOfTables % numOfThreads; + } + + long last = 0; + + for (int i = 0; i < numOfThreads; i++) + { + InsertDataThread insertThread = new InsertDataThread(); + insertThread.id = i; + insertThread.recordsPerTable = recordsPerTable; + insertThread.batchRows = batchRows; + insertThread.numOfTables = numOfTables; + insertThread.verbose = verbose; + insertThread.dbName = this.dbName; + insertThread.tablePrefix = this.tablePrefix; + if (useStable) + { + // insertThread.stableName = stableName; + } + insertThread.conn = conn; + + insertThread.start = last; + if (i < remainder) + { + insertThread.end = last + quotition; + } + else + { + insertThread.end = last + quotition - 1; + } + last = insertThread.end + 1; + + threadArr[i] = new Thread(insertThread.ThreadMain); + threadArr[i].Start(); + threadArr[i].Join(); + } + } + + public void ExecuteQuery() + { + // System.DateTime start = new System.DateTime(); + long queryRows = 0; + + for (int i = 0; i < 1/*this.numOfTables*/; ++i) + { + String sql = "select * from " + this.dbName + "." + tablePrefix + i; + // Console.WriteLine(sql); + + IntPtr res = TDengine.Query(conn, sql); + if (res == IntPtr.Zero) + { + Console.WriteLine(sql + " failure, reason: " + TDengine.Error(res)); + ExitProgram(); + } + + int fieldCount = TDengine.FieldCount(res); + // Console.WriteLine("field count: " + fieldCount); + + List metas = TDengine.FetchFields(res); + for (int j = 0; j < metas.Count; j++) + { + TDengineMeta meta = (TDengineMeta)metas[j]; + // Console.WriteLine("index:" + j + ", type:" + meta.type + ", typename:" + meta.TypeName() + ", name:" + meta.name + ", size:" + meta.size); + } + + IntPtr rowdata; + StringBuilder builder = new StringBuilder(); + while ((rowdata = TDengine.FetchRows(res)) != IntPtr.Zero) + { + queryRows++; + for (int fields = 0; fields < fieldCount; ++fields) + { + TDengineMeta meta = metas[fields]; + int offset = IntPtr.Size * fields; + IntPtr data = Marshal.ReadIntPtr(rowdata, offset); + + builder.Append("---"); + + if (data == IntPtr.Zero) + { + builder.Append("NULL"); + continue; + } + + switch ((TDengineDataType)meta.type) + { + case TDengineDataType.TSDB_DATA_TYPE_BOOL: + bool v1 = Marshal.ReadByte(data) == 0 ? false : true; + builder.Append(v1); + break; + case TDengineDataType.TSDB_DATA_TYPE_TINYINT: + byte v2 = Marshal.ReadByte(data); + builder.Append(v2); + break; + case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: + short v3 = Marshal.ReadInt16(data); + builder.Append(v3); + break; + case TDengineDataType.TSDB_DATA_TYPE_INT: + int v4 = Marshal.ReadInt32(data); + builder.Append(v4); + break; + case TDengineDataType.TSDB_DATA_TYPE_BIGINT: + long v5 = Marshal.ReadInt64(data); + builder.Append(v5); + break; + case TDengineDataType.TSDB_DATA_TYPE_FLOAT: + float v6 = (float)Marshal.PtrToStructure(data, typeof(float)); + builder.Append(v6); + break; + case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: + double v7 = (double)Marshal.PtrToStructure(data, typeof(double)); + builder.Append(v7); + break; + case TDengineDataType.TSDB_DATA_TYPE_BINARY: + string v8 = Marshal.PtrToStringAnsi(data); + builder.Append(v8); + break; + case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: + long v9 = Marshal.ReadInt64(data); + builder.Append(v9); + break; + case TDengineDataType.TSDB_DATA_TYPE_NCHAR: + string v10 = Marshal.PtrToStringAnsi(data); + builder.Append(v10); + break; + } + } + builder.Append("---"); + + if (queryRows <= 10) + { + Console.WriteLine(builder.ToString()); + } + builder.Clear(); + } + + if (TDengine.ErrorNo(res) != 0) + { + Console.Write("Query is not complete, Error {0:G}", + TDengine.ErrorNo(res), TDengine.Error(res)); + } + + TDengine.FreeResult(res); + } + /* + System.DateTime end = new System.DateTime(); + TimeSpan ts = end - start; + + Console.Write("Total {0:G} rows inserted, {1:G} rows query, time spend {2:G} seconds.\n" + , this.rowsInserted, queryRows, ts.TotalSeconds); + */ + } + + public void CloseConnection() + { + if (this.conn != IntPtr.Zero) + { + TDengine.Close(this.conn); + } + } + + // Main entry + static void Main(string[] args) + { + PrintHelp(args); + + TDengineTest tester = new TDengineTest(); + tester.ReadArgument(args); + + tester.InitTDengine(); + tester.ConnectTDengine(); + + if (tester.isInsertOnly == false) + { + tester.dropDatabase(); + tester.CreateDb(); + + + if (tester.useStable == true) + { + tester.CreateStable(); + } + + tester.CreateTablesByThreads(); + } + + Stopwatch watch = Stopwatch.StartNew(); + tester.InsertByThreads(); + watch.Stop(); + double elapsedMs = watch.Elapsed.TotalMilliseconds; + + Console.WriteLine("Spent {0} seconds to insert {1} records with {2} record(s) per request: {3} records/second", + elapsedMs / 1000, + tester.recordsPerTable * tester.numOfTables, + tester.batchRows, + (tester.recordsPerTable * tester.numOfTables * 1000) / elapsedMs); + + tester.ExecuteQuery(); + tester.CloseConnection(); + + Console.WriteLine("End."); + } + + public class InsertDataThread + { + public long id { set; get; } + public long start { set; get; } + public long end { set; get; } + public string dbName { set; get; } + public IntPtr conn { set; get; } + public string tablePrefix { set; get; } + // public string stableName { set; get; } + public long recordsPerTable { set; get; } + public long batchRows { set; get; } + public long numOfTables { set; get; } + public bool verbose { set; get; } + + private void DebugPrintFormat(string format, params object[] parameters) + { + if (verbose == true) + { + Console.Write(format, parameters); + } + } + + private void DebugPrint(string str) + { + if (verbose == true) + { + Console.Write(str); + } + } + + public void ThreadMain() + { + DebugPrintFormat("InsertDataThread {0} from {1} to {2}\n", id, start, end); + StringBuilder sql = new StringBuilder(); + + DateTime now = DateTime.Now; + int h = now.Hour; + int m = now.Minute; + int s = now.Second; + + long baseTimestamp = 1609430400000; // 2021/01/01 0:0:0 + DebugPrintFormat("beginTime is {0} + {1}h:{2}m:{3}s\n", baseTimestamp, h, m, s); + long beginTimestamp = baseTimestamp + ((h*60 + m) * 60 + s) * 1000; + + long rowsInserted = 0; + + // System.DateTime startTime = new System.DateTime(); + long i = 0; + while (i < recordsPerTable) + { + for (long table = start; table <= end; ++table) + { + long inserted = i; + + sql.Clear(); + sql.Append("INSERT INTO "). + Append(this.dbName).Append(".").Append(this.tablePrefix).Append(table). + Append(" VALUES"); + if (recordsPerTable < batchRows) + { + batchRows = recordsPerTable; + } + for (int batch = 0; batch < batchRows; ++batch) + { + sql.Append("(") + .Append(beginTimestamp + i + batch) + .Append(", 1, 2, 3,") + .Append(i + batch) + .Append(", 5, 6, 7, 'abc', 'def')"); + + } + IntPtr res = TDengine.Query(this.conn, sql.ToString()); + if (res == IntPtr.Zero) + { + DebugPrint(sql.ToString() + " failure, reason: " + TDengine.Error(res) + "\n"); + } + + inserted += this.batchRows; + + int affectRows = TDengine.AffectRows(res); + rowsInserted += affectRows; + + TDengine.FreeResult(res); + if (table == end) + { + i = inserted; + } + } + } + + } + } + + public class CreateTableThread + { + public long id { set; get; } + public long start { set; get; } + public long end { set; get; } + public string dbName { set; get; } + public IntPtr conn { set; get; } + public string tablePrefix { set; get; } + public string stableName { set; get; } + public bool verbose { set; get; } + public bool useStable { set; get; } + + private void DebugPrintFormat(string format, params object[] parameters) + { + if (verbose == true) + { + Console.Write(format, parameters); + } + } + + private void DebugPrint(string str) + { + if (verbose == true) + { + Console.Write(str); + } + } + + public void ThreadMain() + { + DebugPrintFormat("CreateTable {0} from {1} to {2}\n", id, start, end); + + StringBuilder sql = new StringBuilder(); + + for (long tableId = start; tableId <= end; tableId++) + { + sql.Clear(); + sql = sql.Append("CREATE TABLE IF NOT EXISTS "). + Append(this.dbName).Append(".").Append(this.tablePrefix).Append(tableId); + if (useStable == true) + { + sql = sql.Append(" USING ").Append(this.dbName).Append(".").Append(this.stableName). + Append(" TAGS(").Append(tableId).Append(")"); + } + else + { + sql = sql.Append("(ts timestamp, v1 bool, v2 tinyint, v3 smallint, v4 int, v5 bigint, v6 float, v7 double, v8 binary(10), v9 nchar(10))"); + } + IntPtr res = TDengine.Query(this.conn, sql.ToString()); + if (res != IntPtr.Zero) + { + DebugPrint(sql.ToString() + " success\n"); + } + else + { + DebugPrint(sql.ToString() + " failure, reason: " + TDengine.Error(res) + "\n"); + ExitProgram(); + } + TDengine.FreeResult(res); + } + + } + } + } +} diff --git a/tests/examples/JDBC/JDBCDemo/pom.xml b/tests/examples/JDBC/JDBCDemo/pom.xml index 98f908b77e8d3bfa03eebf8560205b0424720f1f..d075fc8f2ad480535075b79efc15c55d9bb799a5 100644 --- a/tests/examples/JDBC/JDBCDemo/pom.xml +++ b/tests/examples/JDBC/JDBCDemo/pom.xml @@ -5,17 +5,12 @@ 4.0.0 com.taosdata.jdbc - jdbcChecker + JDBCDemo SNAPSHOT jar + - - org.apache.maven.plugins - maven-assembly-plugin - 3.0.0 - - org.apache.maven.plugins maven-assembly-plugin @@ -23,7 +18,7 @@ - com.taosdata.example.JdbcChecker + com.taosdata.example.JDBCDemo @@ -49,6 +44,7 @@ 8 + @@ -56,18 +52,8 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.12 - - - log4j - log4j - 1.2.17 - - - junit - junit - 4.13.1 - test + 2.0.15 + diff --git a/tests/examples/JDBC/JDBCDemo/readme.md b/tests/examples/JDBC/JDBCDemo/readme.md index e348e458fe938c4f2381c448f3c15e60af27040e..da638a0bcc485cb3d73f75b59348ec260cc871d2 100644 --- a/tests/examples/JDBC/JDBCDemo/readme.md +++ b/tests/examples/JDBC/JDBCDemo/readme.md @@ -1,29 +1,37 @@ -# How to Run the JDBC Demo Code On A Linux OS +# How to Run the JDBC Demo Code On Linux OS TDengine's JDBC demo project is organized in a Maven way so that users can easily compile, package and run the project. If you don't have Maven on your server, you may install it using -
sudo apt-get install maven
+``` +sudo apt-get install maven +``` ## Install TDengine Client Make sure you have already installed a tdengine client on your current develop environment. Download the tdengine package on our website: ``https://www.taosdata.com/cn/all-downloads/`` and install the client. -## How to run jdbcChecker -
mvn clean compile exec:java -Dexec.mainClass="com.taosdata.example.JdbcChecker" -Dexec.args="-host localhost"
- -## How to run jdbcTaosDemo +## Run jdbcDemo using mvn plugin run command: -
 mvn clean compile exec:java -Dexec.mainClass="com.taosdata.example.jdbcTaosdemo.JdbcTaosdemo"
+``` +mvn clean compile exec:java -Dexec.mainClass="com.taosdata.example.JdbcDemo" +``` + and run with your customed args -
mvn clean compile exec:java -Dexec.mainClass="com.taosdata.example.jdbcTaosdemo.JdbcTaosdemo" -Dexec.args="-host localhost"
+``` +mvn clean compile exec:java -Dexec.mainClass="com.taosdata.example.JdbcDemo" -Dexec.args="-host [HOSTNAME]" +``` ## Compile the Demo Code and Run It +To compile taos-jdbcdriver, go to the source directory ``TDengine/src/connector/jdbc`` and execute +``` +mvn clean package -Dmaven.test.skip=true +``` To compile the demo project, go to the source directory ``TDengine/tests/examples/JDBC/JDBCDemo`` and execute - -
+```
 mvn clean package assembly:single
-
+``` -The ``pom.xml`` is configured to package all the dependencies into one executable jar file. +To run JDBCDemo.jar, go to ``TDengine/tests/examples/JDBC/JDBCDemo`` and execute +``` +java -Djava.ext.dirs=../../../../src/connector/jdbc/target:$JAVA_HOME/jre/lib/ext -jar target/JDBCDemo-SNAPSHOT-jar-with-dependencies.jar -host [HOSTNAME] +``` -To run it, go to ``examples/JDBC/JDBCDemo/target`` and execute -
java -jar jdbcChecker-SNAPSHOT-jar-with-dependencies.jar -host localhost
diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcChecker.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JDBCDemo.java similarity index 59% rename from tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcChecker.java rename to tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JDBCDemo.java index 4be71c52214c348ed7b41c3e763de0d908514907..e569de10cf7894aa04fc3cb5bdb8354b581d5a93 100644 --- a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcChecker.java +++ b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JDBCDemo.java @@ -1,56 +1,77 @@ package com.taosdata.example; -import com.taosdata.jdbc.TSDBDriver; - import java.sql.*; import java.util.Properties; -public class JdbcChecker { +public class JDBCDemo { private static String host; - private static String dbName = "test"; - private static String tbName = "weather"; + private static String driverType = "jni"; + private static final String dbName = "test"; + private static final String tbName = "weather"; private Connection connection; - /** - * get connection - **/ + public static void main(String[] args) { + for (int i = 0; i < args.length; i++) { + if ("-host".equalsIgnoreCase(args[i]) && i < args.length - 1) + host = args[++i]; + if ("-driverType".equalsIgnoreCase(args[i]) && i < args.length - 1) { + driverType = args[++i]; + if (!"jni".equalsIgnoreCase(driverType) && !"restful".equalsIgnoreCase(driverType)) + printHelp(); + } + } + + if (host == null) { + printHelp(); + } + + JDBCDemo demo = new JDBCDemo(); + demo.init(); + demo.createDatabase(); + demo.useDatabase(); + demo.dropTable(); + demo.createTable(); + demo.insert(); + demo.select(); + demo.dropTable(); + demo.close(); + } + private void init() { + // get connection try { - Class.forName("com.taosdata.jdbc.TSDBDriver"); + String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; + if (driverType.equals("restful")) { + Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); + url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"; + } else { + Class.forName("com.taosdata.jdbc.TSDBDriver"); + } Properties properties = new Properties(); - properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host); - properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + properties.setProperty("host", host); + properties.setProperty("charset", "UTF-8"); + properties.setProperty("locale", "en_US.UTF-8"); + properties.setProperty("timezone", "UTC-8"); System.out.println("get connection starting..."); - connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties); + connection = DriverManager.getConnection(url, properties); if (connection != null) System.out.println("[ OK ] Connection established."); } catch (ClassNotFoundException | SQLException e) { - throw new RuntimeException("connection failed: " + host); + e.printStackTrace(); } } - /** - * create database - */ private void createDatabase() { String sql = "create database if not exists " + dbName; exuete(sql); } - /** - * use database - */ private void useDatabase() { String sql = "use " + dbName; exuete(sql); } - /** - * select - */ - private void checkSelect() { + private void select() { final String sql = "select * from test.weather"; executeQuery(sql); } @@ -79,40 +100,21 @@ public class JdbcChecker { } } - private String formatString(String str) { - StringBuilder sb = new StringBuilder(); - int blankCnt = (26 - str.length()) / 2; - for (int j = 0; j < blankCnt; j++) - sb.append(" "); - sb.append(str); - for (int j = 0; j < blankCnt; j++) - sb.append(" "); - sb.append("|"); - return sb.toString(); - } - - - /** - * insert - */ - private void checkInsert() { + private void insert() { final String sql = "insert into test.weather (ts, temperature, humidity) values(now, 20.5, 34)"; exuete(sql); } - /** - * create table - */ private void createTable() { final String sql = "create table if not exists " + dbName + "." + tbName + " (ts timestamp, temperature float, humidity int)"; exuete(sql); } - private final void printSql(String sql, boolean succeed, long cost) { + private void printSql(String sql, boolean succeed, long cost) { System.out.println("[ " + (succeed ? "OK" : "ERROR!") + " ] time cost: " + cost + " ms, execute statement ====> " + sql); } - private final void exuete(String sql) { + private void exuete(String sql) { try (Statement statement = connection.createStatement()) { long start = System.currentTimeMillis(); boolean execute = statement.execute(sql); @@ -120,7 +122,7 @@ public class JdbcChecker { printSql(sql, execute, (end - start)); } catch (SQLException e) { e.printStackTrace(); - + } } @@ -135,39 +137,15 @@ public class JdbcChecker { } } - private void checkDropTable() { + private void dropTable() { final String sql = "drop table if exists " + dbName + "." + tbName + ""; exuete(sql); } - public static void main(String[] args) { - for (int i = 0; i < args.length; i++) { - if ("-host".equalsIgnoreCase(args[i]) && i < args.length - 1) { - host = args[++i]; - } - if ("-db".equalsIgnoreCase(args[i]) && i < args.length - 1) { - dbName = args[++i]; - } - if ("-t".equalsIgnoreCase(args[i]) && i < args.length - 1) { - tbName = args[++i]; - } - } - - if (host == null) { - System.out.println("Usage: java -jar JDBCConnectorChecker.jar -host "); - return; - } - - JdbcChecker checker = new JdbcChecker(); - checker.init(); - checker.createDatabase(); - checker.useDatabase(); - checker.checkDropTable(); - checker.createTable(); - checker.checkInsert(); - checker.checkSelect(); - checker.checkDropTable(); - checker.close(); + private static void printHelp() { + System.out.println("Usage: java -jar JdbcDemo.jar -host -driverType "); + System.exit(0); } + } diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/JdbcTaosdemo.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/JdbcTaosdemo.java deleted file mode 100644 index 259985ec9f4708b9317575fd97919adcc82d7161..0000000000000000000000000000000000000000 --- a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/JdbcTaosdemo.java +++ /dev/null @@ -1,357 +0,0 @@ -package com.taosdata.example.jdbcTaosdemo; - -import com.taosdata.example.jdbcTaosdemo.domain.JdbcTaosdemoConfig; -import com.taosdata.example.jdbcTaosdemo.task.CreateTableTask; -import com.taosdata.example.jdbcTaosdemo.task.InsertTableDatetimeTask; -import com.taosdata.example.jdbcTaosdemo.task.InsertTableTask; -import com.taosdata.example.jdbcTaosdemo.utils.ConnectionFactory; -import com.taosdata.example.jdbcTaosdemo.utils.SqlSpeller; -import com.taosdata.example.jdbcTaosdemo.utils.TimeStampUtil; -import org.apache.log4j.Logger; - -import java.sql.*; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -public class JdbcTaosdemo { - - private static Logger logger = Logger.getLogger(JdbcTaosdemo.class); - private final JdbcTaosdemoConfig config; - private Connection connection; - - public JdbcTaosdemo(JdbcTaosdemoConfig config) { - this.config = config; - } - - public static void main(String[] args) { - // parse config from args - JdbcTaosdemoConfig config = new JdbcTaosdemoConfig(args); - - boolean isHelp = Arrays.asList(args).contains("--help"); - if (isHelp) { - JdbcTaosdemoConfig.printHelp(); - return; - } - if (config.getHost() == null) { - JdbcTaosdemoConfig.printHelp(); - return; - } - - JdbcTaosdemo taosdemo = new JdbcTaosdemo(config); - // establish connection - taosdemo.init(); - // drop database - taosdemo.dropDatabase(); - // create database - taosdemo.createDatabase(); - // use db - taosdemo.useDatabase(); - // create super table - taosdemo.createSuperTable(); - // create sub tables - taosdemo.createTableMultiThreads(); - - boolean infinite = Arrays.asList(args).contains("--infinite"); - if (infinite) { - logger.info("!!! Infinite Insert Mode Started. !!!"); - taosdemo.insertInfinite(); - } else { - // insert into table - taosdemo.insertMultiThreads(); - // select from sub table - taosdemo.selectFromTableLimit(); - taosdemo.selectCountFromTable(); - taosdemo.selectAvgMinMaxFromTable(); - // select last from - taosdemo.selectLastFromTable(); - // select from super table - taosdemo.selectFromSuperTableLimit(); - taosdemo.selectCountFromSuperTable(); - taosdemo.selectAvgMinMaxFromSuperTable(); - //select avg ,max from stb where tag - taosdemo.selectAvgMinMaxFromSuperTableWhereTag(); - //select last from stb where location = '' - taosdemo.selectLastFromSuperTableWhere(); - // select group by - taosdemo.selectGroupBy(); - // select like - taosdemo.selectLike(); - // select where ts >= ts<= - taosdemo.selectLastOneHour(); - taosdemo.selectLastOneDay(); - taosdemo.selectLastOneWeek(); - taosdemo.selectLastOneMonth(); - taosdemo.selectLastOneYear(); - - // drop super table - if (config.isDeleteTable()) - taosdemo.dropSuperTable(); - taosdemo.close(); - } - } - - - /** - * establish the connection - */ - private void init() { - try { - Class.forName("com.taosdata.jdbc.TSDBDriver"); - connection = ConnectionFactory.build(config); - if (connection != null) - logger.info("[ OK ] Connection established."); - } catch (ClassNotFoundException | SQLException e) { - logger.error(e.getMessage()); - throw new RuntimeException("connection failed: " + config.getHost()); - } - } - - /** - * create database - */ - private void createDatabase() { - String sql = SqlSpeller.createDatabaseSQL(config.getDbName(), config.getKeep(), config.getDays()); - execute(sql); - } - - /** - * drop database - */ - private void dropDatabase() { - String sql = SqlSpeller.dropDatabaseSQL(config.getDbName()); - execute(sql); - } - - /** - * use database - */ - private void useDatabase() { - String sql = SqlSpeller.useDatabaseSQL(config.getDbName()); - execute(sql); - } - - /** - * create super table - */ - private void createSuperTable() { - String sql = SqlSpeller.createSuperTableSQL(config.getStbName()); - execute(sql); - } - - /** - * create table use super table with multi threads - */ - private void createTableMultiThreads() { - try { - final int tableSize = config.getNumberOfTable() / config.getNumberOfThreads(); - List threads = new ArrayList<>(); - for (int i = 0; i < config.getNumberOfThreads(); i++) { - Thread thread = new Thread(new CreateTableTask(config, i * tableSize, tableSize), "Thread-" + i); - threads.add(thread); - thread.start(); - } - for (Thread thread : threads) { - thread.join(); - } - logger.info("<<< Multi Threads create table finished."); - } catch (InterruptedException e) { - logger.error(e.getMessage()); - e.printStackTrace(); - } - } - - /** - * insert data infinitely - */ - private void insertInfinite() { - try { - final long startDatetime = TimeStampUtil.datetimeToLong("2005-01-01 00:00:00.000"); - final long finishDatetime = TimeStampUtil.datetimeToLong("2030-01-01 00:00:00.000"); - - final int tableSize = config.getNumberOfTable() / config.getNumberOfThreads(); - List threads = new ArrayList<>(); - for (int i = 0; i < config.getNumberOfThreads(); i++) { - Thread thread = new Thread(new InsertTableDatetimeTask(config, i * tableSize, tableSize, startDatetime, finishDatetime), "Thread-" + i); - threads.add(thread); - thread.start(); - } - for (Thread thread : threads) { - thread.join(); - } - logger.info("<<< Multi Threads insert table finished."); - } catch (InterruptedException e) { - logger.error(e.getMessage()); - e.printStackTrace(); - } - } - - private void insertMultiThreads() { - try { - final int tableSize = config.getNumberOfTable() / config.getNumberOfThreads(); - final int numberOfRecordsPerTable = config.getNumberOfRecordsPerTable(); - List threads = new ArrayList<>(); - for (int i = 0; i < config.getNumberOfThreads(); i++) { - Thread thread = new Thread(new InsertTableTask(config, i * tableSize, tableSize, numberOfRecordsPerTable), "Thread-" + i); - threads.add(thread); - thread.start(); - } - for (Thread thread : threads) { - thread.join(); - } - logger.info("<<< Multi Threads insert table finished."); - } catch (InterruptedException e) { - logger.error(e.getMessage()); - e.printStackTrace(); - } - } - - private void selectFromTableLimit() { - String sql = SqlSpeller.selectFromTableLimitSQL(config.getDbName(), config.getTbPrefix(), 1, 10, 0); - executeQuery(sql); - } - - private void selectCountFromTable() { - String sql = SqlSpeller.selectCountFromTableSQL(config.getDbName(), config.getTbPrefix(), 1); - executeQuery(sql); - } - - private void selectAvgMinMaxFromTable() { - String sql = SqlSpeller.selectAvgMinMaxFromTableSQL("current", config.getDbName(), config.getTbPrefix(), 1); - executeQuery(sql); - } - - private void selectLastFromTable() { - String sql = SqlSpeller.selectLastFromTableSQL(config.getDbName(), config.getTbPrefix(), 1); - executeQuery(sql); - } - - private void selectFromSuperTableLimit() { - String sql = SqlSpeller.selectFromSuperTableLimitSQL(config.getDbName(), config.getStbName(), 10, 0); - executeQuery(sql); - } - - private void selectCountFromSuperTable() { - String sql = SqlSpeller.selectCountFromSuperTableSQL(config.getDbName(), config.getStbName()); - executeQuery(sql); - } - - private void selectAvgMinMaxFromSuperTable() { - String sql = SqlSpeller.selectAvgMinMaxFromSuperTableSQL("current", config.getDbName(), config.getStbName()); - executeQuery(sql); - } - - private void selectAvgMinMaxFromSuperTableWhereTag() { - String sql = SqlSpeller.selectAvgMinMaxFromSuperTableWhere("current", config.getDbName(), config.getStbName()); - executeQuery(sql); - } - - private void selectLastFromSuperTableWhere() { - String sql = SqlSpeller.selectLastFromSuperTableWhere("current", config.getDbName(), config.getStbName()); - executeQuery(sql); - } - - private void selectGroupBy() { - String sql = SqlSpeller.selectGroupBy("current", config.getDbName(), config.getStbName()); - executeQuery(sql); - } - - private void selectLike() { - String sql = SqlSpeller.selectLike(config.getDbName(), config.getStbName()); - executeQuery(sql); - } - - private void selectLastOneHour() { - String sql = SqlSpeller.selectLastOneHour(config.getDbName(), config.getStbName()); - executeQuery(sql); - } - - private void selectLastOneDay() { - String sql = SqlSpeller.selectLastOneDay(config.getDbName(), config.getStbName()); - executeQuery(sql); - } - - private void selectLastOneWeek() { - String sql = SqlSpeller.selectLastOneWeek(config.getDbName(), config.getStbName()); - executeQuery(sql); - } - - private void selectLastOneMonth() { - String sql = SqlSpeller.selectLastOneMonth(config.getDbName(), config.getStbName()); - executeQuery(sql); - } - - private void selectLastOneYear() { - String sql = SqlSpeller.selectLastOneYear(config.getDbName(), config.getStbName()); - executeQuery(sql); - } - - - private void close() { - try { - if (connection != null) { - this.connection.close(); - logger.info("connection closed."); - } - } catch (SQLException e) { - logger.error(e.getMessage()); - e.printStackTrace(); - } - } - - /** - * drop super table - */ - private void dropSuperTable() { - String sql = SqlSpeller.dropSuperTableSQL(config.getDbName(), config.getStbName()); - execute(sql); - } - - /** - * execute sql, use this method when sql is create, alter, drop.. - */ - private void execute(String sql) { - try (Statement statement = connection.createStatement()) { - long start = System.currentTimeMillis(); - boolean execute = statement.execute(sql); - long end = System.currentTimeMillis(); - printSql(sql, execute, (end - start)); - } catch (SQLException e) { - logger.error("ERROR execute SQL ===> " + sql); - logger.error(e.getMessage()); - e.printStackTrace(); - } - } - - private static void printSql(String sql, boolean succeed, long cost) { - System.out.println("[ " + (succeed ? "OK" : "ERROR!") + " ] time cost: " + cost + " ms, execute statement ====> " + sql); - } - - private void executeQuery(String sql) { - try (Statement statement = connection.createStatement()) { - long start = System.currentTimeMillis(); - ResultSet resultSet = statement.executeQuery(sql); - long end = System.currentTimeMillis(); - printSql(sql, true, (end - start)); - printResult(resultSet); - } catch (SQLException e) { - logger.error("ERROR execute SQL ===> " + sql); - logger.error(e.getMessage()); - e.printStackTrace(); - } - } - - private static void printResult(ResultSet resultSet) throws SQLException { - ResultSetMetaData metaData = resultSet.getMetaData(); - while (resultSet.next()) { - StringBuilder sb = new StringBuilder(); - for (int i = 1; i <= metaData.getColumnCount(); i++) { - String columnLabel = metaData.getColumnLabel(i); - String value = resultSet.getString(i); - sb.append(columnLabel + ": " + value + "\t"); - } - System.out.println(sb.toString()); - } - } - -} diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/domain/JdbcTaosdemoConfig.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/domain/JdbcTaosdemoConfig.java deleted file mode 100644 index 36745a93941cc690f37d06d9a3662605723bbd2c..0000000000000000000000000000000000000000 --- a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/domain/JdbcTaosdemoConfig.java +++ /dev/null @@ -1,153 +0,0 @@ -package com.taosdata.example.jdbcTaosdemo.domain; - -public final class JdbcTaosdemoConfig { - - //The host to connect to TDengine. Must insert one - private String host; - //The TCP/IP port number to use for the connection. Default is 6030. - private int port = 6030; - //The TDengine user name to use when connecting to the server. Default is 'root' - private String user = "root"; - //The password to use when connecting to the server. Default is 'taosdata' - private String password = "taosdata"; - - //Destination database. Default is 'test' - private String dbName = "test"; - //keep - private int keep = 36500; - //days - private int days = 120; - - //Super table Name. Default is 'meters' - private String stbName = "meters"; - //Table name prefix. Default is 'd' - private String tbPrefix = "d"; - //The number of tables. Default is 10. - private int numberOfTable = 10; - //The number of records per table. Default is 2 - private int numberOfRecordsPerTable = 2; - //The number of records per request. Default is 100 - private int numberOfRecordsPerRequest = 100; - - //The number of threads. Default is 1. - private int numberOfThreads = 1; - //Delete data. Default is false - private boolean deleteTable = false; - - public static void printHelp() { - System.out.println("Usage: java -jar JdbcTaosDemo.jar [OPTION...]"); - System.out.println("-h host The host to connect to TDengine. you must input one"); - System.out.println("-p port The TCP/IP port number to use for the connection. Default is 6030"); - System.out.println("-u user The TDengine user name to use when connecting to the server. Default is 'root'"); - System.out.println("-P password The password to use when connecting to the server.Default is 'taosdata'"); - System.out.println("-d database Destination database. Default is 'test'"); - System.out.println("-m tablePrefix Table prefix name. Default is 'd'"); - System.out.println("-t num_of_tables The number of tables. Default is 10"); - System.out.println("-n num_of_records_per_table The number of records per table. Default is 2"); - System.out.println("-r num_of_records_per_req The number of records per request. Default is 100"); - System.out.println("-T num_of_threads The number of threads. Default is 1"); - System.out.println("-D delete table Delete data methods. Default is false"); - System.out.println("--help Give this help list"); -// System.out.println("--infinite infinite insert mode"); - } - - /** - * parse args from command line - * - * @param args command line args - * @return JdbcTaosdemoConfig - */ - public JdbcTaosdemoConfig(String[] args) { - for (int i = 0; i < args.length; i++) { - if ("-h".equals(args[i]) && i < args.length - 1) { - host = args[++i]; - } - if ("-p".equals(args[i]) && i < args.length - 1) { - port = Integer.parseInt(args[++i]); - } - if ("-u".equals(args[i]) && i < args.length - 1) { - user = args[++i]; - } - if ("-P".equals(args[i]) && i < args.length - 1) { - password = args[++i]; - } - if ("-d".equals(args[i]) && i < args.length - 1) { - dbName = args[++i]; - } - if ("-m".equals(args[i]) && i < args.length - 1) { - tbPrefix = args[++i]; - } - if ("-t".equals(args[i]) && i < args.length - 1) { - numberOfTable = Integer.parseInt(args[++i]); - } - if ("-n".equals(args[i]) && i < args.length - 1) { - numberOfRecordsPerTable = Integer.parseInt(args[++i]); - } - if ("-r".equals(args[i]) && i < args.length - 1) { - numberOfRecordsPerRequest = Integer.parseInt(args[++i]); - } - if ("-T".equals(args[i]) && i < args.length - 1) { - numberOfThreads = Integer.parseInt(args[++i]); - } - if ("-D".equals(args[i]) && i < args.length - 1) { - deleteTable = Boolean.parseBoolean(args[++i]); - } - } - } - - public String getHost() { - return host; - } - - public int getPort() { - return port; - } - - public String getUser() { - return user; - } - - public String getPassword() { - return password; - } - - public String getDbName() { - return dbName; - } - - public int getKeep() { - return keep; - } - - public int getDays() { - return days; - } - - public String getStbName() { - return stbName; - } - - public String getTbPrefix() { - return tbPrefix; - } - - public int getNumberOfTable() { - return numberOfTable; - } - - public int getNumberOfRecordsPerTable() { - return numberOfRecordsPerTable; - } - - public int getNumberOfThreads() { - return numberOfThreads; - } - - public boolean isDeleteTable() { - return deleteTable; - } - - public int getNumberOfRecordsPerRequest() { - return numberOfRecordsPerRequest; - } -} diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/task/CreateTableTask.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/task/CreateTableTask.java deleted file mode 100644 index 1da2c8647efe0e9204f1a591ba9431a489c91cb0..0000000000000000000000000000000000000000 --- a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/task/CreateTableTask.java +++ /dev/null @@ -1,42 +0,0 @@ -package com.taosdata.example.jdbcTaosdemo.task; - -import com.taosdata.example.jdbcTaosdemo.domain.JdbcTaosdemoConfig; -import com.taosdata.example.jdbcTaosdemo.utils.ConnectionFactory; -import com.taosdata.example.jdbcTaosdemo.utils.SqlSpeller; -import org.apache.log4j.Logger; - -import java.sql.Connection; -import java.sql.SQLException; -import java.sql.Statement; - -public class CreateTableTask implements Runnable { - - private static Logger logger = Logger.getLogger(CreateTableTask.class); - private final JdbcTaosdemoConfig config; - private final int startIndex; - private final int tableNumber; - - public CreateTableTask(JdbcTaosdemoConfig config, int startIndex, int tableNumber) { - this.config = config; - this.startIndex = startIndex; - this.tableNumber = tableNumber; - } - - @Override - public void run() { - try { - Connection connection = ConnectionFactory.build(config); - for (int i = startIndex; i < startIndex + tableNumber; i++) { - Statement statement = connection.createStatement(); - String sql = SqlSpeller.createTableSQL(i + 1, config.getDbName(), config.getStbName()); - statement.execute(sql); - statement.close(); - logger.info(">>> " + sql); - } - connection.close(); - } catch (SQLException e) { - logger.error(e.getMessage()); - e.printStackTrace(); - } - } -} diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/task/InsertTableDatetimeTask.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/task/InsertTableDatetimeTask.java deleted file mode 100644 index 4f60c25646573223a9cbfd820c8eb37e4f6f6c8c..0000000000000000000000000000000000000000 --- a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/task/InsertTableDatetimeTask.java +++ /dev/null @@ -1,49 +0,0 @@ -package com.taosdata.example.jdbcTaosdemo.task; - -import com.taosdata.example.jdbcTaosdemo.domain.JdbcTaosdemoConfig; -import com.taosdata.example.jdbcTaosdemo.utils.ConnectionFactory; -import com.taosdata.example.jdbcTaosdemo.utils.SqlSpeller; -import org.apache.log4j.Logger; - -import java.sql.Connection; -import java.sql.SQLException; -import java.sql.Statement; - -public class InsertTableDatetimeTask implements Runnable { - private static Logger logger = Logger.getLogger(InsertTableDatetimeTask.class); - - private final JdbcTaosdemoConfig config; - private final int startTableIndex; - private final int tableNumber; - private final long startDatetime; - private final long finishedDatetime; - - public InsertTableDatetimeTask(JdbcTaosdemoConfig config, int startTableIndex, int tableNumber, long startDatetime, long finishedDatetime) { - this.config = config; - this.startTableIndex = startTableIndex; - this.tableNumber = tableNumber; - this.startDatetime = startDatetime; - this.finishedDatetime = finishedDatetime; - } - - @Override - public void run() { - try { - Connection connection = ConnectionFactory.build(config); - int valuesCount = config.getNumberOfRecordsPerRequest(); - for (long ts = startDatetime; ts < finishedDatetime; ts += valuesCount) { - for (int i = startTableIndex; i < startTableIndex + tableNumber; i++) { - String sql = SqlSpeller.insertBatchSizeRowsSQL(config.getDbName(), config.getTbPrefix(), i + 1, ts, valuesCount); - Statement statement = connection.createStatement(); - statement.execute(sql); - statement.close(); - logger.info(Thread.currentThread().getName() + ">>> " + sql); - } - } - connection.close(); - } catch (SQLException e) { - logger.error(e.getMessage()); - e.printStackTrace(); - } - } -} diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/task/InsertTableTask.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/task/InsertTableTask.java deleted file mode 100644 index 644de52dd3e75a77c2d635a6f5328f186259096c..0000000000000000000000000000000000000000 --- a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/task/InsertTableTask.java +++ /dev/null @@ -1,57 +0,0 @@ -package com.taosdata.example.jdbcTaosdemo.task; - -import com.taosdata.example.jdbcTaosdemo.domain.JdbcTaosdemoConfig; -import com.taosdata.example.jdbcTaosdemo.utils.ConnectionFactory; -import com.taosdata.example.jdbcTaosdemo.utils.SqlSpeller; -import org.apache.log4j.Logger; - -import java.sql.Connection; -import java.sql.SQLException; -import java.sql.Statement; -import java.time.Duration; -import java.time.Instant; -import java.time.temporal.ChronoUnit; - -public class InsertTableTask implements Runnable { - private static final Logger logger = Logger.getLogger(InsertTableTask.class); - - private final JdbcTaosdemoConfig config; - private final int startTbIndex; - private final int tableNumber; - private final int recordsNumberPerTable; - - public InsertTableTask(JdbcTaosdemoConfig config, int startTbIndex, int tableNumber, int recordsNumberPerTable) { - this.config = config; - this.startTbIndex = startTbIndex; - this.tableNumber = tableNumber; - this.recordsNumberPerTable = recordsNumberPerTable; - } - - @Override - public void run() { - try { - Connection connection = ConnectionFactory.build(config); - int keep = config.getKeep(); - Instant end = Instant.now(); - Instant start = end.minus(Duration.ofDays(keep - 1)); - long timeGap = ChronoUnit.MILLIS.between(start, end) / (recordsNumberPerTable - 1); - - // iterate insert - for (int j = 0; j < recordsNumberPerTable; j++) { - long ts = start.toEpochMilli() + (j * timeGap); - // insert data into echo table - for (int i = startTbIndex; i < startTbIndex + tableNumber; i++) { - String sql = SqlSpeller.insertBatchSizeRowsSQL(config.getDbName(), config.getTbPrefix(), i + 1, ts, config.getNumberOfRecordsPerRequest()); - logger.info(Thread.currentThread().getName() + ">>> " + sql); - Statement statement = connection.createStatement(); - statement.execute(sql); - statement.close(); - } - } - connection.close(); - } catch (SQLException e) { - logger.error(e.getMessage()); - e.printStackTrace(); - } - } -} diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/utils/ConnectionFactory.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/utils/ConnectionFactory.java deleted file mode 100644 index 52691f4de722db2eb17e7061e09ff0e59a390077..0000000000000000000000000000000000000000 --- a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/utils/ConnectionFactory.java +++ /dev/null @@ -1,32 +0,0 @@ -package com.taosdata.example.jdbcTaosdemo.utils; - -import com.taosdata.example.jdbcTaosdemo.domain.JdbcTaosdemoConfig; -import com.taosdata.jdbc.TSDBDriver; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; -import java.util.Properties; - -public class ConnectionFactory { - - public static Connection build(JdbcTaosdemoConfig config) throws SQLException { - return build(config.getHost(), config.getPort(), config.getDbName(), config.getUser(), config.getPassword()); - } - - public static Connection build(String host, int port, String dbName) throws SQLException { - return build(host, port, dbName, "root", "taosdata"); - } - - private static Connection build(String host, int port, String dbName, String user, String password) throws SQLException { - Properties properties = new Properties(); - properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, user); - properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, password); - properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); - return DriverManager.getConnection("jdbc:TAOS://" + host + ":" + port + "/" + dbName + "", properties); - } - - -} diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/utils/SqlSpeller.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/utils/SqlSpeller.java deleted file mode 100644 index b4a79e9eba47cc947d822b645d0ae1f9952f08f0..0000000000000000000000000000000000000000 --- a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/utils/SqlSpeller.java +++ /dev/null @@ -1,126 +0,0 @@ -package com.taosdata.example.jdbcTaosdemo.utils; - -import java.util.Random; - -public class SqlSpeller { - private static final Random random = new Random(System.currentTimeMillis()); - private static final String[] locations = { - "Beijing", "Shanghai", "Guangzhou", "Shenzhen", - "HangZhou", "Tianjin", "Wuhan", "Changsha", "Nanjing", "Xian" - }; - - public static String createDatabaseSQL(String dbName, int keep, int days) { - return "create database if not exists " + dbName + " keep " + keep + " days " + days; - } - - public static String dropDatabaseSQL(String dbName) { - return "drop database if exists " + dbName; - } - - public static String useDatabaseSQL(String dbName) { - return "use " + dbName; - } - - public static String createSuperTableSQL(String superTableName) { - return "create table if not exists " + superTableName + "(ts timestamp, current float, voltage int, phase float) tags(location binary(64), groupId int)"; - } - - public static String dropSuperTableSQL(String dbName, String superTableName) { - return "drop table if exists " + dbName + "." + superTableName; - } - - public static String createTableSQL(int tableIndex, String dbName, String superTableName) { - String location = locations[random.nextInt(locations.length)]; - return "create table d" + tableIndex + " using " + dbName + "." + superTableName + " tags('" + location + "'," + tableIndex + ")"; - } - - public static String insertOneRowSQL(String dbName, String tbPrefix, int tableIndex, long ts) { - float current = 10 + random.nextFloat(); - int voltage = 200 + random.nextInt(20); - float phase = random.nextFloat(); - String sql = "insert into " + dbName + "." + tbPrefix + "" + tableIndex + " " + "values(" + ts + ", " + current + ", " + voltage + ", " + phase + ")"; - return sql; - } - - public static String insertBatchSizeRowsSQL(String dbName, String tbPrefix, int tbIndex, long ts, int valuesCount) { - float current = 10 + random.nextFloat(); - int voltage = 200 + random.nextInt(20); - float phase = random.nextFloat(); - StringBuilder sb = new StringBuilder(); - sb.append("insert into " + dbName + "." + tbPrefix + "" + tbIndex + " " + "values"); - for (int i = 0; i < valuesCount; i++) { - sb.append("(" + (ts + i) + ", " + current + ", " + voltage + ", " + phase + ") "); - } - return sb.toString(); - } - - public static String selectFromTableLimitSQL(String dbName, String tbPrefix, int tbIndex, int limit, int offset) { - return "select * from " + dbName + "." + tbPrefix + "" + tbIndex + " limit " + limit + " offset " + offset; - } - - public static String selectCountFromTableSQL(String dbName, String tbPrefix, int tbIndex) { - return "select count(*) from " + dbName + "." + tbPrefix + "" + tbIndex; - } - - public static String selectAvgMinMaxFromTableSQL(String field, String dbName, String tbPrefix, int tbIndex) { - return "select avg(" + field + "),min(" + field + "),max(" + field + ") from " + dbName + "." + tbPrefix + "" + tbIndex; - } - - public static String selectFromSuperTableLimitSQL(String dbName, String stbName, int limit, int offset) { - return "select * from " + dbName + "." + stbName + " limit " + limit + " offset " + offset; - } - - public static String selectCountFromSuperTableSQL(String dbName, String stableName) { - return "select count(*) from " + dbName + "." + stableName; - } - - public static String selectAvgMinMaxFromSuperTableSQL(String field, String dbName, String stbName) { - return "select avg(" + field + "),min(" + field + "),max(" + field + ") from " + dbName + "." + stbName + ""; - } - - public static String selectLastFromTableSQL(String dbName, String tbPrefix, int tbIndex) { - return "select last(*) from " + dbName + "." + tbPrefix + "" + tbIndex; - } - - //select avg ,max from stb where tag - public static String selectAvgMinMaxFromSuperTableWhere(String field, String dbName, String stbName) { - return "select avg(" + field + "),min(" + field + "),max(" + field + ") from " + dbName + "." + stbName + " where location = '" + locations[random.nextInt(locations.length)] + "'"; - } - - //select last from stb where - public static String selectLastFromSuperTableWhere(String field, String dbName, String stbName) { - return "select last(" + field + ") from " + dbName + "." + stbName + " where location = '" + locations[random.nextInt(locations.length)] + "'"; - } - - public static String selectGroupBy(String field, String dbName, String stbName) { - return "select avg(" + field + ") from " + dbName + "." + stbName + " group by location"; - } - - public static String selectLike(String dbName, String stbName) { - return "select * from " + dbName + "." + stbName + " where location like 'S%'"; - } - - public static String selectLastOneHour(String dbName, String stbName) { - return "select * from " + dbName + "." + stbName + " where ts >= now - 1h"; - } - - public static String selectLastOneDay(String dbName, String stbName) { - return "select * from " + dbName + "." + stbName + " where ts >= now - 1d"; - } - - public static String selectLastOneWeek(String dbName, String stbName) { - return "select * from " + dbName + "." + stbName + " where ts >= now - 1w"; - } - - public static String selectLastOneMonth(String dbName, String stbName) { - return "select * from " + dbName + "." + stbName + " where ts >= now - 1n"; - } - - public static String selectLastOneYear(String dbName, String stbName) { - return "select * from " + dbName + "." + stbName + " where ts >= now - 1y"; - } - - // select group by - // select like - // select ts >= ts<= -} \ No newline at end of file diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/utils/TimeStampUtil.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/utils/TimeStampUtil.java deleted file mode 100644 index 0a345afdd1e45123d889d7ee198cf8efd201176b..0000000000000000000000000000000000000000 --- a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/jdbcTaosdemo/utils/TimeStampUtil.java +++ /dev/null @@ -1,37 +0,0 @@ -package com.taosdata.example.jdbcTaosdemo.utils; - -import java.text.ParseException; -import java.text.SimpleDateFormat; -import java.time.Duration; -import java.time.Instant; -import java.util.Date; - -public class TimeStampUtil { - private static final String datetimeFormat = "yyyy-MM-dd HH:mm:ss.SSS"; - - public static long datetimeToLong(String dateTime) { - SimpleDateFormat sdf = new SimpleDateFormat(datetimeFormat); - try { - return sdf.parse(dateTime).getTime(); - } catch (ParseException e) { - throw new RuntimeException(e); - } - } - - public static String longToDatetime(long time) { - SimpleDateFormat sdf = new SimpleDateFormat(datetimeFormat); - return sdf.format(new Date(time)); - } - - public static void main(String[] args) throws ParseException { - -// Instant now = Instant.now(); -// System.out.println(now); -// Instant years20Ago = now.minus(Duration.ofDays(365)); -// System.out.println(years20Ago); - - - } - - -} diff --git a/tests/examples/JDBC/JDBCDemo/src/main/resources/log4j.properties b/tests/examples/JDBC/JDBCDemo/src/main/resources/log4j.properties deleted file mode 100644 index b445e5f52edf1a2b19c35c1be187716d17cdd7b1..0000000000000000000000000000000000000000 --- a/tests/examples/JDBC/JDBCDemo/src/main/resources/log4j.properties +++ /dev/null @@ -1,21 +0,0 @@ -### 设置### -log4j.rootLogger=debug,stdout,DebugLog,ErrorLog -### 输出信息到控制抬 ### -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.Target=System.out -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=[%-5p] %d{yyyy-MM-dd HH:mm:ss,SSS} method:%l%n%m%n -### 输出DEBUG 级别以上的日志到=logs/error.log ### -log4j.appender.DebugLog=org.apache.log4j.DailyRollingFileAppender -log4j.appender.DebugLog.File=logs/debug.log -log4j.appender.DebugLog.Append=true -log4j.appender.DebugLog.Threshold=DEBUG -log4j.appender.DebugLog.layout=org.apache.log4j.PatternLayout -log4j.appender.DebugLog.layout.ConversionPattern=%-d{yyyy-MM-dd HH:mm:ss} [ %t:%r ] - [ %p ] %m%n -### 输出ERROR 级别以上的日志到=logs/error.log ### -log4j.appender.ErrorLog=org.apache.log4j.DailyRollingFileAppender -log4j.appender.ErrorLog.File=logs/error.log -log4j.appender.ErrorLog.Append=true -log4j.appender.ErrorLog.Threshold=ERROR -log4j.appender.ErrorLog.layout=org.apache.log4j.PatternLayout -log4j.appender.ErrorLog.layout.ConversionPattern=%-d{yyyy-MM-dd HH:mm:ss} [ %t:%r ] - [ %p ] %m%n \ No newline at end of file diff --git a/tests/examples/JDBC/JDBCDemo/src/test/java/com/taosdata/example/jdbcTaosdemo/utils/TimeStampUtilTest.java b/tests/examples/JDBC/JDBCDemo/src/test/java/com/taosdata/example/jdbcTaosdemo/utils/TimeStampUtilTest.java deleted file mode 100644 index f370b2ef6eaa708b061ebf4a7f58f3d31f78f999..0000000000000000000000000000000000000000 --- a/tests/examples/JDBC/JDBCDemo/src/test/java/com/taosdata/example/jdbcTaosdemo/utils/TimeStampUtilTest.java +++ /dev/null @@ -1,52 +0,0 @@ -package com.taosdata.example.jdbcTaosdemo.utils; - -import org.junit.Test; - -import java.text.ParseException; -import java.text.SimpleDateFormat; -import java.time.Duration; -import java.time.Instant; -import java.time.format.DateTimeFormatter; -import java.time.temporal.ChronoUnit; -import java.util.Date; - -import static org.junit.Assert.*; - -public class TimeStampUtilTest { - - @Test - public void datetimeToLong() { - final String startTime = "2005-01-01 00:00:00.000"; - long start = TimeStampUtil.datetimeToLong(startTime); - assertEquals(1104508800000l, start); - } - - @Test - public void longToDatetime() { - String datetime = TimeStampUtil.longToDatetime(1510000000000L); - assertEquals("2017-11-07 04:26:40.000", datetime); - } - - @Test - public void getStartDateTime() { - int keep = 365; - - Instant end = Instant.now(); - System.out.println(end.toString()); - System.out.println(end.toEpochMilli()); - - Instant start = end.minus(Duration.ofDays(keep)); - System.out.println(start.toString()); - System.out.println(start.toEpochMilli()); - - int numberOfRecordsPerTable = 10; - long timeGap = ChronoUnit.MILLIS.between(start, end) / (numberOfRecordsPerTable - 1); - System.out.println(timeGap); - - System.out.println("==========================="); - for (int i = 0; i < numberOfRecordsPerTable; i++) { - long ts = start.toEpochMilli() + (i * timeGap); - System.out.println(i + " : " + ts); - } - } -} \ No newline at end of file diff --git a/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/ConnectionPoolDemo.java b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/ConnectionPoolDemo.java index 79c0aacea740dcb6fca9780c7f64872c537c3225..4e33b75bc57aa18ebc0cae166e84401d4357d614 100644 --- a/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/ConnectionPoolDemo.java +++ b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/ConnectionPoolDemo.java @@ -82,12 +82,27 @@ public class ConnectionPoolDemo { init(dataSource); - ExecutorService executor = Executors.newFixedThreadPool(threadCount); - while (true) { - executor.execute(new InsertTask(dataSource, dbName, tableSize, batchSize)); - if (sleep > 0) - TimeUnit.MILLISECONDS.sleep(sleep); + try { + Connection connection = dataSource.getConnection(); + Statement statement = connection.createStatement(); + String sql = "insert into " + dbName + ".t_1 values('2020-01-01 00:00:00.000',12.12,111)"; + int affectRows = statement.executeUpdate(sql); + System.out.println("affectRows >>> " + affectRows); + affectRows = statement.executeUpdate(sql); + System.out.println("affectRows >>> " + affectRows); + statement.close(); + connection.close(); + } catch (SQLException e) { + e.printStackTrace(); } + + +// ExecutorService executor = Executors.newFixedThreadPool(threadCount); +// while (true) { +// executor.execute(new InsertTask(dataSource, dbName, tableSize, batchSize)); +// if (sleep > 0) +// TimeUnit.MILLISECONDS.sleep(sleep); +// } } private static void init(DataSource dataSource) { diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/SpringbootdemoApplication.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/SpringbootdemoApplication.java index 69cd3e0ced2888575d890ffea36407455c4bea7a..f693214567fb3e8780e8ee1a867cf765de0ec8d7 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/SpringbootdemoApplication.java +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/jdbc/springbootdemo/SpringbootdemoApplication.java @@ -6,7 +6,7 @@ import org.springframework.boot.autoconfigure.SpringBootApplication; @MapperScan(basePackages = {"com.taosdata.jdbc.springbootdemo.dao"}) @SpringBootApplication -public class SpringbootdemoApplication { +public class cd { public static void main(String[] args) { SpringApplication.run(SpringbootdemoApplication.class, args); diff --git a/tests/examples/JDBC/taosdemo/pom.xml b/tests/examples/JDBC/taosdemo/pom.xml index 5cbf6cb7004566e8d6ea6e16e8b70f289ac2941d..15b22917b6103740ac419e046d5fd8fa79363d25 100644 --- a/tests/examples/JDBC/taosdemo/pom.xml +++ b/tests/examples/JDBC/taosdemo/pom.xml @@ -2,88 +2,107 @@ 4.0.0 - - org.springframework.boot - spring-boot-starter-parent - 2.4.0 - - com.taosdata taosdemo 2.0 taosdemo + jar Demo project for TDengine - 1.8 + 5.3.2 - - + - com.taosdata.jdbc - taos-jdbcdriver - 2.0.14 + org.springframework + spring-context + ${spring.version} - - mysql - mysql-connector-java - 5.1.47 + org.springframework + spring-core + ${spring.version} - - com.baomidou - mybatis-plus-boot-starter - 3.1.2 + org.springframework + spring-beans + ${spring.version} - - log4j - log4j - 1.2.17 + org.springframework + spring-expression + ${spring.version} + + + org.springframework + spring-aop + ${spring.version} + + + org.springframework + spring-aspects + ${spring.version} + + + org.springframework + spring-test + ${spring.version} + test + + + org.springframework + spring-jdbc + ${spring.version} - + + + com.zaxxer + HikariCP + 3.4.5 + + - org.springframework.boot - spring-boot-starter-jdbc + com.taosdata.jdbc + taos-jdbcdriver + 2.0.15 + system + ${project.basedir}/src/main/resources/lib/taos-jdbcdriver-2.0.15-dist.jar + - org.springframework.boot - spring-boot-starter-thymeleaf + com.alibaba + fastjson + 1.2.75 + - org.springframework.boot - spring-boot-starter-web + mysql + mysql-connector-java + 5.1.47 + test + - org.mybatis.spring.boot - mybatis-spring-boot-starter - 2.1.4 + log4j + log4j + 1.2.17 + junit junit 4.12 test - - org.springframework.boot - spring-boot-devtools - runtime - true - + org.projectlombok lombok - true - - - org.springframework.boot - spring-boot-starter-test - test + 1.18.16 + provided @@ -94,6 +113,7 @@ **/*.properties **/*.xml + **/*.jar true @@ -108,10 +128,41 @@ - org.springframework.boot - spring-boot-maven-plugin + org.apache.maven.plugins + maven-compiler-plugin + + 8 + 8 + + + + + org.apache.maven.plugins + maven-assembly-plugin + 3.1.0 + + + + + com.taosdata.taosdemo.TaosDemoApplication + + + + jar-with-dependencies + + + + + make-assembly + package + + single + + + + diff --git a/tests/examples/JDBC/taosdemo/readme.md b/tests/examples/JDBC/taosdemo/readme.md new file mode 100644 index 0000000000000000000000000000000000000000..a4b6e297691c4981fc36a06679d085c183e890a1 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/readme.md @@ -0,0 +1,3 @@ +需求: +1. 可以读lowa的配置文件 +2. 支持对JNI方式和Restful方式的taos-driver \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java new file mode 100644 index 0000000000000000000000000000000000000000..b9a22a1ef75962159bd33a8e525898a69b67a911 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java @@ -0,0 +1,112 @@ +package com.taosdata.taosdemo; + +import com.taosdata.taosdemo.components.DataSourceFactory; +import com.taosdata.taosdemo.components.JdbcTaosdemoConfig; +import com.taosdata.taosdemo.domain.SuperTableMeta; +import com.taosdata.taosdemo.service.DatabaseService; +import com.taosdata.taosdemo.service.SubTableService; +import com.taosdata.taosdemo.service.SuperTableService; +import com.taosdata.taosdemo.service.data.SuperTableMetaGenerator; +import org.apache.log4j.Logger; + +import javax.sql.DataSource; +import java.io.IOException; +import java.time.Duration; +import java.time.Instant; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; + +public class TaosDemoApplication { + + private static Logger logger = Logger.getLogger(TaosDemoApplication.class); + + public static void main(String[] args) throws IOException { + // 读配置参数 + JdbcTaosdemoConfig config = new JdbcTaosdemoConfig(args); + boolean isHelp = Arrays.asList(args).contains("--help"); + if (isHelp || config.host == null || config.host.isEmpty()) { +// if (isHelp) { + JdbcTaosdemoConfig.printHelp(); + System.exit(0); + } + // 初始化 + final DataSource dataSource = DataSourceFactory.getInstance(config.host, config.port, config.user, config.password); + final DatabaseService databaseService = new DatabaseService(dataSource); + final SuperTableService superTableService = new SuperTableService(dataSource); + final SubTableService subTableService = new SubTableService(dataSource); + // 创建数据库 + long start = System.currentTimeMillis(); + Map databaseParam = new HashMap<>(); + databaseParam.put("database", config.database); + databaseParam.put("keep", Integer.toString(config.keep)); + databaseParam.put("days", Integer.toString(config.days)); + databaseParam.put("replica", Integer.toString(config.replica)); + //TODO: other database parameters + databaseService.createDatabase(databaseParam); + databaseService.useDatabase(config.database); + long end = System.currentTimeMillis(); + logger.info(">>> create database time cost : " + (end - start) + " ms."); + /**********************************************************************************/ + // 构造超级表的meta + SuperTableMeta superTableMeta; + // create super table + if (config.superTableSQL != null) { + // use a sql to create super table + superTableMeta = SuperTableMetaGenerator.generate(config.superTableSQL); + if (config.database != null && !config.database.isEmpty()) + superTableMeta.setDatabase(config.database); + } else if (config.numOfFields == 0) { + String sql = "create table " + config.database + "." + config.superTable + " (ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int)"; + superTableMeta = SuperTableMetaGenerator.generate(sql); + } else { + // create super table with specified field size and tag size + superTableMeta = SuperTableMetaGenerator.generate(config.database, config.superTable, config.numOfFields, config.prefixOfFields, config.numOfTags, config.prefixOfTags); + } + /**********************************************************************************/ + // 建表 + start = System.currentTimeMillis(); + if (config.doCreateTable) { + superTableService.drop(superTableMeta.getDatabase(), superTableMeta.getName()); + superTableService.create(superTableMeta); + if (!config.autoCreateTable) { + // 批量建子表 + subTableService.createSubTable(superTableMeta, config.numOfTables, config.prefixOfTable, config.numOfThreadsForCreate); + } + } + end = System.currentTimeMillis(); + logger.error(">>> create table time cost : " + (end - start) + " ms."); + /**********************************************************************************/ + // 插入 + long tableSize = config.numOfTables; + int threadSize = config.numOfThreadsForInsert; + long startTime = getProperStartTime(config.startTime, config.keep); + + if (tableSize < threadSize) + threadSize = (int) tableSize; + long gap = (long) Math.ceil((0.0d + tableSize) / threadSize); + + start = System.currentTimeMillis(); + // multi threads to insert + int affectedRows = subTableService.insertMultiThreads(superTableMeta, threadSize, tableSize, startTime, gap, config); + end = System.currentTimeMillis(); + logger.error("insert " + affectedRows + " rows, time cost: " + (end - start) + " ms"); + /**********************************************************************************/ + // 删除表 + if (config.dropTable) { + superTableService.drop(config.database, config.superTable); + } + System.exit(0); + } + + private static long getProperStartTime(long startTime, int keep) { + Instant now = Instant.now(); + long earliest = now.minus(Duration.ofDays(keep - 1)).toEpochMilli(); + if (startTime == 0 || startTime < earliest) { + startTime = earliest; + } + return startTime; + } + + +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosdemoApplication.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosdemoApplication.java deleted file mode 100644 index db1b20527d88f5bca24ed5c6a3b477fa3a71bdf1..0000000000000000000000000000000000000000 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosdemoApplication.java +++ /dev/null @@ -1,15 +0,0 @@ -package com.taosdata.taosdemo; - -import org.mybatis.spring.annotation.MapperScan; -import org.springframework.boot.SpringApplication; -import org.springframework.boot.autoconfigure.SpringBootApplication; - -@MapperScan(basePackages = {"com.taosdata.taosdemo.mapper"}) -@SpringBootApplication -public class TaosdemoApplication { - - public static void main(String[] args) { - SpringApplication.run(TaosdemoApplication.class, args); - } - -} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/components/DataSourceFactory.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/components/DataSourceFactory.java new file mode 100644 index 0000000000000000000000000000000000000000..c96d6f8bed68e9bb67d959ddb1d7531b4cbadeb3 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/components/DataSourceFactory.java @@ -0,0 +1,63 @@ +package com.taosdata.taosdemo.components; + +import com.zaxxer.hikari.HikariConfig; +import com.zaxxer.hikari.HikariDataSource; +import org.springframework.stereotype.Component; + +import javax.sql.DataSource; +import java.io.IOException; +import java.io.InputStream; +import java.util.Properties; + +@Component +public class DataSourceFactory { + + private static DataSource instance; + + public static DataSource getInstance(String host, int port, String user, String password) throws IOException { + if (instance == null) { + synchronized (DataSourceFactory.class) { + if (instance == null) { + InputStream is = DataSourceFactory.class.getClassLoader().getResourceAsStream("application.properties"); + Properties properties = new Properties(); + properties.load(is); + + HikariConfig config = new HikariConfig(); + + if (properties.containsKey("jdbc.driver")) { +// String driverName = properties.getProperty("jdbc.driver"); +// System.out.println(">>> load driver : " + driverName); +// try { +// Class.forName(driverName); +// } catch (ClassNotFoundException e) { +// e.printStackTrace(); +// } + config.setDriverClassName(properties.getProperty("jdbc.driver")); + } else { + config.setDriverClassName("com.taosdata.jdbc.TSDBDriver"); + } + if ("com.taosdata.jdbc.rs.RestfulDriver".equalsIgnoreCase(properties.getProperty("jdbc.driver"))) + config.setJdbcUrl("jdbc:TAOS-RS://" + host + ":6041/?charset=UTF-8&locale=en_US.UTF-8&timezone=UTC-8"); + else + config.setJdbcUrl("jdbc:TAOS://" + host + ":" + port + "/?charset=UTF-8&locale=en_US.UTF-8&timezone=UTC-8"); + config.setUsername(user); + config.setPassword(password); + // maximum-pool-size + if (properties.containsKey("hikari.maximum-pool-size")) + config.setMaximumPoolSize(Integer.parseInt(properties.getProperty("hikari.maximum-pool-size"))); + else + config.setMaximumPoolSize(500); + // minimum-idle + if (properties.containsKey("hikari.minimum-idle")) + config.setMinimumIdle(Integer.parseInt(properties.getProperty("hikari.minimum-idle"))); + else + config.setMinimumIdle(100); + config.setMaxLifetime(0); + instance = new HikariDataSource(config); + } + } + } + return instance; + } + +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/JdbcTaosdemoConfig.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/components/JdbcTaosdemoConfig.java similarity index 83% rename from tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/JdbcTaosdemoConfig.java rename to tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/components/JdbcTaosdemoConfig.java index 4e6f64033012ca402d4a07787ef71a0525b25acb..971c10dee2889543e95a70b244ea3cda462df3a6 100644 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/JdbcTaosdemoConfig.java +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/components/JdbcTaosdemoConfig.java @@ -1,4 +1,6 @@ -package com.taosdata.taosdemo.utils; +package com.taosdata.taosdemo.components; + +import com.taosdata.taosdemo.utils.TimeStampUtil; public final class JdbcTaosdemoConfig { // instance @@ -7,10 +9,14 @@ public final class JdbcTaosdemoConfig { public String user = "root"; //user public String password = "taosdata"; //password // database - public String database = "test"; //database + public String database = "jdbcdb"; //database public int keep = 3650; //keep public int days = 30; //days public int replica = 1; //replica + public int blocks = 16; + public int cache = 8; + public String precision = "ms"; + //super table public boolean doCreateTable = true; public String superTable = "weather"; //super table name @@ -20,19 +26,19 @@ public final class JdbcTaosdemoConfig { public int numOfTags; public String superTableSQL; //sub table - public String tablePrefix = "t"; - public int numOfTables = 100; - public int numOfThreadsForCreate = 1; + public String prefixOfTable = "t"; // insert task public boolean autoCreateTable; - public int numOfRowsPerTable = 100; + public long numOfTables = 10; + public long numOfRowsPerTable = 10; + public int numOfTablesPerSQL = 1; + public int numOfValuesPerSQL = 1; + public int numOfThreadsForCreate = 1; public int numOfThreadsForInsert = 1; - public int numOfTablesPerSQL = 10; - public int numOfValuesPerSQL = 10; public long startTime; - public long timeGap; - public int sleep = 0; - public int order = 0; + public long timeGap = 1; + public int frequency; + public int order; public int rate = 10; public long range = 1000l; // select task @@ -48,10 +54,14 @@ public final class JdbcTaosdemoConfig { System.out.println("-user The TDengine user name to use when connecting to the server. Default is 'root'"); System.out.println("-password The password to use when connecting to the server.Default is 'taosdata'"); // database - System.out.println("-database Destination database. Default is 'test'"); + System.out.println("-database Destination database. Default is 'jdbcdb'"); System.out.println("-keep database keep parameter. Default is 3650"); System.out.println("-days database days parameter. Default is 30"); System.out.println("-replica database replica parameter. Default 1, min: 1, max: 3"); + System.out.println("-blocks database blocks parameter. Default is 16"); + System.out.println("-cache database cache parameter. Default is 8"); + System.out.println("-precision database precision parameter. Default is ms"); + // super table System.out.println("-doCreateTable do create super table and sub table, true or false, Default true"); System.out.println("-superTable super table name. Default 'weather'"); @@ -63,7 +73,7 @@ public final class JdbcTaosdemoConfig { " Default is 'create table weather(ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int). \n" + " if you use this parameter, the numOfFields and numOfTags will be invalid'"); // sub table - System.out.println("-tablePrefix The prefix of sub tables. Default is 't'"); + System.out.println("-prefixOfTable The prefix of sub tables. Default is 't'"); System.out.println("-numOfTables The number of tables. Default is 1"); System.out.println("-numOfThreadsForCreate The number of thread during create sub table. Default is 1"); // insert task @@ -74,11 +84,10 @@ public final class JdbcTaosdemoConfig { System.out.println("-numOfValuesPerSQL The number of value per SQL. Default is 1"); System.out.println("-startTime start time for insert task, The format is \"yyyy-MM-dd HH:mm:ss.SSS\"."); System.out.println("-timeGap the number of time gap. Default is 1000 ms"); - System.out.println("-sleep The number of milliseconds for sleep after each insert. default is 0"); + System.out.println("-frequency the number of records per second inserted into one table. default is 0, do not control frequency"); System.out.println("-order Insert mode--0: In order, 1: Out of order. Default is in order"); System.out.println("-rate The proportion of data out of order. effective only if order is 1. min 0, max 100, default is 10"); System.out.println("-range The range of data out of order. effective only if order is 1. default is 1000 ms"); - // query task // System.out.println("-sqlFile The select sql file"); // drop task @@ -120,6 +129,15 @@ public final class JdbcTaosdemoConfig { if ("-replica".equals(args[i]) && i < args.length - 1) { replica = Integer.parseInt(args[++i]); } + if ("-blocks".equals(args[i]) && i < args.length - 1) { + blocks = Integer.parseInt(args[++i]); + } + if ("-cache".equals(args[i]) && i < args.length - 1) { + cache = Integer.parseInt(args[++i]); + } + if ("-precision".equals(args[i]) && i < args.length - 1) { + precision = args[++i]; + } // super table if ("-doCreateTable".equals(args[i]) && i < args.length - 1) { doCreateTable = Boolean.parseBoolean(args[++i]); @@ -143,11 +161,11 @@ public final class JdbcTaosdemoConfig { superTableSQL = args[++i]; } // sub table - if ("-tablePrefix".equals(args[i]) && i < args.length - 1) { - tablePrefix = args[++i]; + if ("-prefixOfTable".equals(args[i]) && i < args.length - 1) { + prefixOfTable = args[++i]; } if ("-numOfTables".equals(args[i]) && i < args.length - 1) { - numOfTables = Integer.parseInt(args[++i]); + numOfTables = Long.parseLong(args[++i]); } if ("-autoCreateTable".equals(args[i]) && i < args.length - 1) { autoCreateTable = Boolean.parseBoolean(args[++i]); @@ -157,7 +175,7 @@ public final class JdbcTaosdemoConfig { } // insert task if ("-numOfRowsPerTable".equals(args[i]) && i < args.length - 1) { - numOfRowsPerTable = Integer.parseInt(args[++i]); + numOfRowsPerTable = Long.parseLong(args[++i]); } if ("-numOfThreadsForInsert".equals(args[i]) && i < args.length - 1) { numOfThreadsForInsert = Integer.parseInt(args[++i]); @@ -174,8 +192,8 @@ public final class JdbcTaosdemoConfig { if ("-timeGap".equals(args[i]) && i < args.length - 1) { timeGap = Long.parseLong(args[++i]); } - if ("-sleep".equals(args[i]) && i < args.length - 1) { - sleep = Integer.parseInt(args[++i]); + if ("-frequency".equals(args[i]) && i < args.length - 1) { + frequency = Integer.parseInt(args[++i]); } if ("-order".equals(args[i]) && i < args.length - 1) { order = Integer.parseInt(args[++i]); @@ -197,8 +215,4 @@ public final class JdbcTaosdemoConfig { } } - public static void main(String[] args) { - JdbcTaosdemoConfig config = new JdbcTaosdemoConfig(args); - } - } diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/components/JsonConfig.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/components/JsonConfig.java new file mode 100644 index 0000000000000000000000000000000000000000..1c44610095f4b383f82a74dfdc11030a28afb246 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/components/JsonConfig.java @@ -0,0 +1,39 @@ +package com.taosdata.taosdemo.components; + +import com.alibaba.fastjson.JSONObject; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; + +public class JsonConfig { + + public static void main(String[] args) { + + JsonConfig config = new JsonConfig(); + String str = config.read("insert.json"); + JSONObject jsonObject = JSONObject.parseObject(str); + System.out.println(jsonObject); + + } + + private String read(String fileName) { + try { + BufferedReader reader = new BufferedReader( + new InputStreamReader(JsonConfig.class.getClassLoader().getResourceAsStream(fileName)) + ); + StringBuilder sb = new StringBuilder(); + String line = null; + while ((line = reader.readLine()) != null) { + sb.append(line); + } + return sb.toString(); + } catch (IOException e) { + e.printStackTrace(); + } + + return fileName; + } + + +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/components/TaosDemoCommandLineRunner.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/components/TaosDemoCommandLineRunner.java deleted file mode 100644 index e58c68f7a5f60b57f064075c4004267918526b15..0000000000000000000000000000000000000000 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/components/TaosDemoCommandLineRunner.java +++ /dev/null @@ -1,174 +0,0 @@ -package com.taosdata.taosdemo.components; - -import com.taosdata.taosdemo.domain.*; -import com.taosdata.taosdemo.service.DatabaseService; -import com.taosdata.taosdemo.service.SubTableService; -import com.taosdata.taosdemo.service.SuperTableService; -import com.taosdata.taosdemo.service.data.SubTableMetaGenerator; -import com.taosdata.taosdemo.service.data.SubTableValueGenerator; -import com.taosdata.taosdemo.service.data.SuperTableMetaGenerator; -import com.taosdata.taosdemo.utils.JdbcTaosdemoConfig; -import org.apache.log4j.Logger; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.CommandLineRunner; -import org.springframework.stereotype.Component; - -import java.util.*; -import java.util.concurrent.TimeUnit; - - -@Component -public class TaosDemoCommandLineRunner implements CommandLineRunner { - - private static Logger logger = Logger.getLogger(TaosDemoCommandLineRunner.class); - @Autowired - private DatabaseService databaseService; - @Autowired - private SuperTableService superTableService; - @Autowired - private SubTableService subTableService; - - private SuperTableMeta superTableMeta; - private List subTableMetaList; - private List subTableValueList; - private List> dataList; - - - @Override - public void run(String... args) throws Exception { - // 读配置参数 - JdbcTaosdemoConfig config = new JdbcTaosdemoConfig(args); - boolean isHelp = Arrays.asList(args).contains("--help"); - if (isHelp) { - JdbcTaosdemoConfig.printHelp(); - System.exit(0); - } - // 准备数据 - prepareData(config); - // 创建数据库 - createDatabaseTask(config); - // 建表 - createTableTask(config); - // 插入 - insertTask(config); - // 查询: 1. 生成查询语句, 2. 执行查询 - // 删除表 - if (config.dropTable) { - superTableService.drop(config.database, config.superTable); - } - - System.exit(0); - } - - private void createDatabaseTask(JdbcTaosdemoConfig config) { - long start = System.currentTimeMillis(); - - Map databaseParam = new HashMap<>(); - databaseParam.put("database", config.database); - databaseParam.put("keep", Integer.toString(config.keep)); - databaseParam.put("days", Integer.toString(config.days)); - databaseParam.put("replica", Integer.toString(config.replica)); - //TODO: other database parameters - databaseService.dropDatabase(config.database); - databaseService.createDatabase(databaseParam); - databaseService.useDatabase(config.database); - - long end = System.currentTimeMillis(); - logger.info(">>> insert time cost : " + (end - start) + " ms."); - } - - // 建超级表,三种方式:1. 指定SQL,2. 指定field和tags的个数,3. 默认 - private void createTableTask(JdbcTaosdemoConfig config) { - long start = System.currentTimeMillis(); - if (config.doCreateTable) { - superTableService.create(superTableMeta); - // 批量建子表 - subTableService.createSubTable(subTableMetaList, config.numOfThreadsForCreate); - } - long end = System.currentTimeMillis(); - logger.info(">>> create table time cost : " + (end - start) + " ms."); - } - - private void insertTask(JdbcTaosdemoConfig config) { - long start = System.currentTimeMillis(); - - int numOfThreadsForInsert = config.numOfThreadsForInsert; - int sleep = config.sleep; - if (config.autoCreateTable) { - // 批量插入,自动建表 - dataList.stream().forEach(subTableValues -> { - subTableService.insertAutoCreateTable(subTableValues, numOfThreadsForInsert); - sleep(sleep); - }); - } else { - dataList.stream().forEach(subTableValues -> { - subTableService.insert(subTableValues, numOfThreadsForInsert); - sleep(sleep); - }); - } - long end = System.currentTimeMillis(); - logger.info(">>> insert time cost : " + (end - start) + " ms."); - } - - private void prepareData(JdbcTaosdemoConfig config) { - long start = System.currentTimeMillis(); - // 超级表的meta - superTableMeta = createSupertable(config); - // 子表的meta - subTableMetaList = SubTableMetaGenerator.generate(superTableMeta, config.numOfTables, config.tablePrefix); - // 子表的data - subTableValueList = SubTableValueGenerator.generate(subTableMetaList, config.numOfRowsPerTable, config.startTime, config.timeGap); - // 如果有乱序,给数据搞乱 - if (config.order != 0) { - SubTableValueGenerator.disrupt(subTableValueList, config.rate, config.range); - } - // 分割数据 - int numOfTables = config.numOfTables; - int numOfTablesPerSQL = config.numOfTablesPerSQL; - int numOfRowsPerTable = config.numOfRowsPerTable; - int numOfValuesPerSQL = config.numOfValuesPerSQL; - dataList = SubTableValueGenerator.split(subTableValueList, numOfTables, numOfTablesPerSQL, numOfRowsPerTable, numOfValuesPerSQL); - long end = System.currentTimeMillis(); - logger.info(">>> prepare data time cost : " + (end - start) + " ms."); - } - - private SuperTableMeta createSupertable(JdbcTaosdemoConfig config) { - SuperTableMeta tableMeta; - // create super table - logger.info(">>> create super table <<<"); - if (config.superTableSQL != null) { - // use a sql to create super table - tableMeta = SuperTableMetaGenerator.generate(config.superTableSQL); - } else if (config.numOfFields == 0) { - // default sql = "create table test.weather (ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int)"; - SuperTableMeta superTableMeta = new SuperTableMeta(); - superTableMeta.setDatabase(config.database); - superTableMeta.setName(config.superTable); - List fields = new ArrayList<>(); - fields.add(new FieldMeta("ts", "timestamp")); - fields.add(new FieldMeta("temperature", "float")); - fields.add(new FieldMeta("humidity", "int")); - superTableMeta.setFields(fields); - List tags = new ArrayList<>(); - tags.add(new TagMeta("location", "nchar(64)")); - tags.add(new TagMeta("groupId", "int")); - superTableMeta.setTags(tags); - return superTableMeta; - } else { - // create super table with specified field size and tag size - tableMeta = SuperTableMetaGenerator.generate(config.database, config.superTable, config.numOfFields, config.prefixOfFields, config.numOfTags, config.prefixOfTags); - } - return tableMeta; - } - - private static void sleep(int sleep) { - if (sleep <= 0) - return; - try { - TimeUnit.MILLISECONDS.sleep(sleep); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - -} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/DatabaseController.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/DatabaseController.java deleted file mode 100644 index 1cf1463f0ab4f2a8c67258f512e407d54c1d869e..0000000000000000000000000000000000000000 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/DatabaseController.java +++ /dev/null @@ -1,40 +0,0 @@ -package com.taosdata.taosdemo.controller; - -import com.taosdata.taosdemo.service.DatabaseService; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.web.bind.annotation.*; - -import java.util.Map; - -@RestController -@RequestMapping -public class DatabaseController { - - @Autowired - private DatabaseService databaseService; - - /** - * create database - ***/ - @PostMapping - public int create(@RequestBody Map map) { - return databaseService.createDatabase(map); - } - - - /** - * drop database - **/ - @DeleteMapping("/{dbname}") - public int delete(@PathVariable("dbname") String dbname) { - return databaseService.dropDatabase(dbname); - } - - /** - * use database - **/ - @GetMapping("/{dbname}") - public int use(@PathVariable("dbname") String dbname) { - return databaseService.useDatabase(dbname); - } -} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/InsertController.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/InsertController.java deleted file mode 100644 index 788f68a30af5ac7dd4c8c27f4cfe5f1a6f2b440b..0000000000000000000000000000000000000000 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/InsertController.java +++ /dev/null @@ -1,17 +0,0 @@ -package com.taosdata.taosdemo.controller; - -import org.springframework.web.bind.annotation.RestController; - -@RestController -public class InsertController { - - //TODO:多线程写一张表, thread = 10, table = 1 - //TODO:一个批次写多张表, insert into t1 using weather values() t2 using weather values() - //TODO:插入的频率, - //TODO:指定一张表内的records数量 - //TODO:是否乱序, - //TODO:乱序的比例,乱序的范围 - //TODO:先建表,自动建表 - //TODO:一个批次写多张表 - -} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/SubTableController.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/SubTableController.java deleted file mode 100644 index 797c3708d3357c19bf5f64046bb794b66786e080..0000000000000000000000000000000000000000 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/SubTableController.java +++ /dev/null @@ -1,45 +0,0 @@ -package com.taosdata.taosdemo.controller; - -import com.taosdata.taosdemo.domain.TableValue; -import com.taosdata.taosdemo.service.SuperTableService; -import com.taosdata.taosdemo.service.TableService; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.web.bind.annotation.PathVariable; -import org.springframework.web.bind.annotation.PostMapping; -import org.springframework.web.bind.annotation.RequestBody; -import org.springframework.web.bind.annotation.RestController; - -@RestController -public class SubTableController { - - @Autowired - private TableService tableService; - @Autowired - private SuperTableService superTableService; - - //TODO: 使用supertable创建一个子表 - - //TODO:使用supertable创建多个子表 - - //TODO:使用supertable多线程创建子表 - - //TODO:使用supertable多线程创建子表,指定子表的name_prefix,子表的数量,使用线程的个数 - - /** - * 创建表,超级表或者普通表 - **/ - - - /** - * 创建超级表的子表 - **/ - @PostMapping("/{database}/{superTable}") - public int createTable(@PathVariable("database") String database, - @PathVariable("superTable") String superTable, - @RequestBody TableValue tableMetadta) { - tableMetadta.setDatabase(database); - return 0; - } - - -} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/SuperTableController.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/SuperTableController.java deleted file mode 100644 index cf53c1440fa710c3bea510973ca2efecb0705828..0000000000000000000000000000000000000000 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/SuperTableController.java +++ /dev/null @@ -1,26 +0,0 @@ -package com.taosdata.taosdemo.controller; - -import com.taosdata.taosdemo.domain.SuperTableMeta; -import com.taosdata.taosdemo.service.SuperTableService; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.web.bind.annotation.PathVariable; -import org.springframework.web.bind.annotation.PostMapping; -import org.springframework.web.bind.annotation.RequestBody; - -public class SuperTableController { - @Autowired - private SuperTableService superTableService; - - - @PostMapping("/{database}") - public int createTable(@PathVariable("database") String database, @RequestBody SuperTableMeta tableMetadta) { - tableMetadta.setDatabase(database); - return superTableService.create(tableMetadta); - } - - //TODO: 删除超级表 - - //TODO:查询超级表 - - //TODO:统计查询表 -} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/TableController.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/TableController.java deleted file mode 100644 index dbdd978e746eb89d8927617ae4026cdcf544559b..0000000000000000000000000000000000000000 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/TableController.java +++ /dev/null @@ -1,11 +0,0 @@ -package com.taosdata.taosdemo.controller; - -public class TableController { - - //TODO:创建普通表,create table(ts timestamp, temperature float) - - //TODO:创建普通表,指定表的列数,包括第一列timestamp - - //TODO:创建普通表,指定表每列的name和type - -} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/DatabaseMapper.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/DatabaseMapper.java new file mode 100644 index 0000000000000000000000000000000000000000..56e38d2bfce8ba5801d0f8dc48093d198852fbca --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/DatabaseMapper.java @@ -0,0 +1,23 @@ +package com.taosdata.taosdemo.dao; + +import java.util.Map; + +public interface DatabaseMapper { + + // create database if not exists XXX + void createDatabase(String dbname); + + // drop database if exists XXX + void dropDatabase(String dbname); + + // create database if not exists XXX keep XX days XX replica XX + void createDatabaseWithParameters(Map map); + + // use XXX + void useDatabase(String dbname); + + //TODO: alter database + + //TODO: show database + +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/DatabaseMapperImpl.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/DatabaseMapperImpl.java new file mode 100644 index 0000000000000000000000000000000000000000..69bae160f6285bb9d1a94357cc2279ebfca22eca --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/DatabaseMapperImpl.java @@ -0,0 +1,47 @@ +package com.taosdata.taosdemo.dao; + +import com.taosdata.taosdemo.utils.SqlSpeller; +import org.apache.log4j.Logger; +import org.springframework.jdbc.core.JdbcTemplate; + +import javax.sql.DataSource; +import java.util.Map; + +public class DatabaseMapperImpl implements DatabaseMapper { + private static final Logger logger = Logger.getLogger(DatabaseMapperImpl.class); + + private final JdbcTemplate jdbcTemplate; + + public DatabaseMapperImpl(DataSource dataSource) { + this.jdbcTemplate = new JdbcTemplate(dataSource); + } + + + @Override + public void createDatabase(String dbname) { + String sql = "create database if not exists " + dbname; + jdbcTemplate.execute(sql); + logger.info("SQL >>> " + sql); + } + + @Override + public void dropDatabase(String dbname) { + String sql = "drop database if exists " + dbname; + jdbcTemplate.update(sql); + logger.info("SQL >>> " + sql); + } + + @Override + public void createDatabaseWithParameters(Map map) { + String sql = SqlSpeller.createDatabase(map); + jdbcTemplate.execute(sql); + logger.info("SQL >>> " + sql); + } + + @Override + public void useDatabase(String dbname) { + String sql = "use " + dbname; + jdbcTemplate.execute(sql); + logger.info("SQL >>> " + sql); + } +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SubTableMapper.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SubTableMapper.java similarity index 67% rename from tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SubTableMapper.java rename to tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SubTableMapper.java index d23473ba313899eee04f07f5c06177a2d55f6295..e0ddd220c19066afd1cc332f007a82e2fdab2b07 100644 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SubTableMapper.java +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SubTableMapper.java @@ -1,8 +1,7 @@ -package com.taosdata.taosdemo.mapper; +package com.taosdata.taosdemo.dao; import com.taosdata.taosdemo.domain.SubTableMeta; import com.taosdata.taosdemo.domain.SubTableValue; -import org.apache.ibatis.annotations.Param; import org.springframework.stereotype.Repository; import java.util.List; @@ -11,7 +10,7 @@ import java.util.List; public interface SubTableMapper { // 创建:子表 - int createUsingSuperTable(SubTableMeta subTableMeta); + void createUsingSuperTable(SubTableMeta subTableMeta); // 插入:一张子表多个values int insertOneTableMultiValues(SubTableValue subTableValue); @@ -20,10 +19,10 @@ public interface SubTableMapper { int insertOneTableMultiValuesUsingSuperTable(SubTableValue subTableValue); // 插入:多张表多个values - int insertMultiTableMultiValues(@Param("tables") List tables); + int insertMultiTableMultiValues(List tables); // 插入:多张表多个values,自动建表 - int insertMultiTableMultiValuesUsingSuperTable(@Param("tables") List tables); + int insertMultiTableMultiValuesUsingSuperTable(List tables); // diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SubTableMapperImpl.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SubTableMapperImpl.java new file mode 100644 index 0000000000000000000000000000000000000000..e3a6691430f8b5232660ed6edeab01318db30ef1 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SubTableMapperImpl.java @@ -0,0 +1,81 @@ +package com.taosdata.taosdemo.dao; + +import com.taosdata.taosdemo.domain.SubTableMeta; +import com.taosdata.taosdemo.domain.SubTableValue; +import com.taosdata.taosdemo.utils.SqlSpeller; +import org.apache.log4j.Logger; +import org.springframework.jdbc.core.JdbcTemplate; + +import javax.sql.DataSource; +import java.util.List; + +public class SubTableMapperImpl implements SubTableMapper { + + private static final Logger logger = Logger.getLogger(SubTableMapperImpl.class); + private final JdbcTemplate jdbcTemplate; + + public SubTableMapperImpl(DataSource dataSource) { + this.jdbcTemplate = new JdbcTemplate(dataSource); + } + + @Override + public void createUsingSuperTable(SubTableMeta subTableMeta) { + String sql = SqlSpeller.createTableUsingSuperTable(subTableMeta); + logger.info("SQL >>> " + sql); + jdbcTemplate.execute(sql); + } + + @Override + public int insertOneTableMultiValues(SubTableValue subTableValue) { + String sql = SqlSpeller.insertOneTableMultiValues(subTableValue); + logger.info("SQL >>> " + sql); + + int affectRows = 0; + try { + affectRows = jdbcTemplate.update(sql); + } catch (Exception e) { + e.printStackTrace(); + } + return affectRows; + } + + @Override + public int insertOneTableMultiValuesUsingSuperTable(SubTableValue subTableValue) { + String sql = SqlSpeller.insertOneTableMultiValuesUsingSuperTable(subTableValue); + logger.info("SQL >>> " + sql); + + int affectRows = 0; + try { + affectRows = jdbcTemplate.update(sql); + } catch (Exception e) { + e.printStackTrace(); + } + return affectRows; + } + + @Override + public int insertMultiTableMultiValues(List tables) { + String sql = SqlSpeller.insertMultiSubTableMultiValues(tables); + logger.info("SQL >>> " + sql); + int affectRows = 0; + try { + affectRows = jdbcTemplate.update(sql); + } catch (Exception e) { + e.printStackTrace(); + } + return affectRows; + } + + @Override + public int insertMultiTableMultiValuesUsingSuperTable(List tables) { + String sql = SqlSpeller.insertMultiTableMultiValuesUsingSuperTable(tables); + logger.info("SQL >>> " + sql); + int affectRows = 0; + try { + affectRows = jdbcTemplate.update(sql); + } catch (Exception e) { + e.printStackTrace(); + } + return affectRows; + } +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SuperTableMapper.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SuperTableMapper.java similarity index 70% rename from tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SuperTableMapper.java rename to tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SuperTableMapper.java index c8610fac901efa907971d700bd9956cac264236f..9f8cec9e8fa5af8741d9efbdce72f240aa7a09aa 100644 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SuperTableMapper.java +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SuperTableMapper.java @@ -1,20 +1,16 @@ -package com.taosdata.taosdemo.mapper; +package com.taosdata.taosdemo.dao; import com.taosdata.taosdemo.domain.SuperTableMeta; -import org.apache.ibatis.annotations.Param; import org.springframework.stereotype.Repository; @Repository public interface SuperTableMapper { - // 创建超级表,使用自己定义的SQL语句 - int createSuperTableUsingSQL(@Param("createSuperTableSQL") String sql); - // 创建超级表 create table if not exists xxx.xxx (f1 type1, f2 type2, ... ) tags( t1 type1, t2 type2 ...) - int createSuperTable(SuperTableMeta tableMetadata); + void createSuperTable(SuperTableMeta tableMetadata); // 删除超级表 drop table if exists xxx; - int dropSuperTable(@Param("database") String database, @Param("name") String name); + void dropSuperTable(String database, String name); // diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SuperTableMapperImpl.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SuperTableMapperImpl.java new file mode 100644 index 0000000000000000000000000000000000000000..a293de5100d9af33bc88ad5f02ae632333a5b5b8 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SuperTableMapperImpl.java @@ -0,0 +1,31 @@ +package com.taosdata.taosdemo.dao; + +import com.taosdata.taosdemo.domain.SuperTableMeta; +import com.taosdata.taosdemo.utils.SqlSpeller; +import org.apache.log4j.Logger; +import org.springframework.jdbc.core.JdbcTemplate; + +import javax.sql.DataSource; + +public class SuperTableMapperImpl implements SuperTableMapper { + private static final Logger logger = Logger.getLogger(SuperTableMapperImpl.class); + private JdbcTemplate jdbcTemplate; + + public SuperTableMapperImpl(DataSource dataSource) { + this.jdbcTemplate = new JdbcTemplate(dataSource); + } + + @Override + public void createSuperTable(SuperTableMeta tableMetadata) { + String sql = SqlSpeller.createSuperTable(tableMetadata); + logger.info("SQL >>> " + sql); + jdbcTemplate.execute(sql); + } + + @Override + public void dropSuperTable(String database, String name) { + String sql = "drop table if exists " + database + "." + name; + logger.info("SQL >>> " + sql); + jdbcTemplate.execute(sql); + } +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/TableMapper.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/TableMapper.java similarity index 65% rename from tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/TableMapper.java rename to tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/TableMapper.java index f00f6c96941d47258d91a34ffeb1de99c4c8435e..32d1875e4d1a82f7dfb658d68ed0e63a5cbfa040 100644 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/TableMapper.java +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/TableMapper.java @@ -1,8 +1,7 @@ -package com.taosdata.taosdemo.mapper; +package com.taosdata.taosdemo.dao; import com.taosdata.taosdemo.domain.TableMeta; import com.taosdata.taosdemo.domain.TableValue; -import org.apache.ibatis.annotations.Param; import org.springframework.stereotype.Repository; import java.util.List; @@ -11,7 +10,7 @@ import java.util.List; public interface TableMapper { // 创建:普通表 - int create(TableMeta tableMeta); + void create(TableMeta tableMeta); // 插入:一张表多个value int insertOneTableMultiValues(TableValue values); @@ -20,9 +19,9 @@ public interface TableMapper { int insertOneTableMultiValuesWithColumns(TableValue values); // 插入:多个表多个value - int insertMultiTableMultiValues(@Param("tables") List tables); + int insertMultiTableMultiValues(List tables); // 插入:多个表多个value, 指定的列 - int insertMultiTableMultiValuesWithColumns(@Param("tables") List tables); + int insertMultiTableMultiValuesWithColumns(List tables); } \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/TableMapperImpl.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/TableMapperImpl.java new file mode 100644 index 0000000000000000000000000000000000000000..77415619f02a1a0650448e9a8856fcb8d33e4921 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/TableMapperImpl.java @@ -0,0 +1,43 @@ +package com.taosdata.taosdemo.dao; + +import com.taosdata.taosdemo.dao.TableMapper; +import com.taosdata.taosdemo.domain.TableMeta; +import com.taosdata.taosdemo.domain.TableValue; +import com.taosdata.taosdemo.utils.SqlSpeller; +import org.springframework.jdbc.core.JdbcTemplate; + +import java.util.List; + +public class TableMapperImpl implements TableMapper { + private JdbcTemplate template; + + @Override + public void create(TableMeta tableMeta) { + String sql = SqlSpeller.createTable(tableMeta); + template.execute(sql); + } + + @Override + public int insertOneTableMultiValues(TableValue values) { + String sql = SqlSpeller.insertOneTableMultiValues(values); + return template.update(sql); + } + + @Override + public int insertOneTableMultiValuesWithColumns(TableValue values) { + String sql = SqlSpeller.insertOneTableMultiValuesWithColumns(values); + return template.update(sql); + } + + @Override + public int insertMultiTableMultiValues(List tables) { + String sql = SqlSpeller.insertMultiTableMultiValues(tables); + return template.update(sql); + } + + @Override + public int insertMultiTableMultiValuesWithColumns(List tables) { + String sql = SqlSpeller.insertMultiTableMultiValuesWithColumns(tables); + return template.update(sql); + } +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/RowValue.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/RowValue.java index a9f216f6792820931af4570450318ddc53e3c8b7..a444fa78dcdeb8f1bb76974a29051c98348a055b 100644 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/RowValue.java +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/RowValue.java @@ -8,7 +8,6 @@ import java.util.List; public class RowValue { private List fields; - public RowValue(List fields) { this.fields = fields; } diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/DatabaseMapper.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/DatabaseMapper.java deleted file mode 100644 index e535ed1f985d37688c8461e1f0bef3a803f541d4..0000000000000000000000000000000000000000 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/DatabaseMapper.java +++ /dev/null @@ -1,27 +0,0 @@ -package com.taosdata.taosdemo.mapper; - -import org.apache.ibatis.annotations.Param; -import org.springframework.stereotype.Repository; - -import java.util.Map; - -@Repository -public interface DatabaseMapper { - - // create database if not exists XXX - int createDatabase(@Param("database") String dbname); - - // drop database if exists XXX - int dropDatabase(@Param("database") String dbname); - - // create database if not exists XXX keep XX days XX replica XX - int createDatabaseWithParameters(Map map); - - // use XXX - int useDatabase(@Param("database") String dbname); - - //TODO: alter database - - //TODO: show database - -} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/DatabaseMapper.xml b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/DatabaseMapper.xml deleted file mode 100644 index 1a1de348428ae63ad16f9988f535f339f61d8c25..0000000000000000000000000000000000000000 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/DatabaseMapper.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - create database if not exists ${database} - - - - DROP database if exists ${database} - - - - CREATE database if not exists ${database} - - KEEP ${keep} - - - DAYS ${days} - - - REPLICA ${replica} - - - cache ${cache} - - - blocks ${blocks} - - - minrows ${minrows} - - - maxrows ${maxrows} - - - - - use ${database} - - - - - - - \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SubTableMapper.xml b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SubTableMapper.xml deleted file mode 100644 index 2fb94e99b77623ab8731c03f398058dbdc459d07..0000000000000000000000000000000000000000 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SubTableMapper.xml +++ /dev/null @@ -1,81 +0,0 @@ - - - - - - - - CREATE table IF NOT EXISTS ${database}.${name} USING ${supertable} TAGS - - #{tag.value} - - - - - - INSERT INTO ${database}.${name} - VALUES - - - #{field.value} - - - - - - - INSERT INTO ${database}.${name} USING ${supertable} TAGS - - #{tag.value} - - VALUES - - - #{field.value} - - - - - - - - - - - INSERT INTO - - ${table.database}.${table.name} - VALUES - - - #{field.value} - - - - - - - - INSERT INTO - - ${table.database}.${table.name} USING ${table.supertable} TAGS - - #{tag.value} - - VALUES - - - #{field.value} - - - - - - - - - - - - - \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SuperTableMapper.xml b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SuperTableMapper.xml deleted file mode 100644 index 8b83d57a4bcbf7a2a6b3f69911b3accc61064480..0000000000000000000000000000000000000000 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SuperTableMapper.xml +++ /dev/null @@ -1,41 +0,0 @@ - - - - - - - ${createSuperTableSQL} - - - - - create table if not exists ${database}.${name} - - ${field.name} ${field.type} - - tags - - ${tag.name} ${tag.type} - - - - - - drop table if exists ${database}.${name} - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/TableMapper.xml b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/TableMapper.xml deleted file mode 100644 index e2e7cbb30def21f88ec7615b7a6dd4a769cb643a..0000000000000000000000000000000000000000 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/TableMapper.xml +++ /dev/null @@ -1,68 +0,0 @@ - - - - - - - - create table if not exists ${database}.${name} - - ${field.name} ${field.type} - - - - - - insert into ${database}.${name} values - - - ${field.value} - - - - - - - insert into ${database}.${name} - - ${column.name} - - values - - - ${field.value} - - - - - - - insert into - - ${table.database}.${table.name} values - - - ${field.value} - - - - - - - - insert into - - ${table.database}.${table.name} - - ${column.name} - - values - - - ${field.value} - - - - - - \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/DatabaseService.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/DatabaseService.java index e9aa2727a06ade0ff3d7661f84977c64f64ea9af..3c8e9624066bf629a74ebcaa3959a2e15338c363 100644 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/DatabaseService.java +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/DatabaseService.java @@ -1,38 +1,42 @@ package com.taosdata.taosdemo.service; -import com.taosdata.taosdemo.mapper.DatabaseMapper; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Service; +import com.taosdata.taosdemo.dao.DatabaseMapper; +import com.taosdata.taosdemo.dao.DatabaseMapperImpl; +import javax.sql.DataSource; import java.util.Map; -@Service public class DatabaseService { - @Autowired - private DatabaseMapper databaseMapper; + private final DatabaseMapper databaseMapper; + + public DatabaseService(DataSource dataSource) { + this.databaseMapper = new DatabaseMapperImpl(dataSource); + } // 建库,指定 name - public int createDatabase(String database) { - return databaseMapper.createDatabase(database); + public void createDatabase(String database) { + databaseMapper.createDatabase(database); } // 建库,指定参数 keep,days,replica等 - public int createDatabase(Map map) { + public void createDatabase(Map map) { if (map.isEmpty()) - return 0; - if (map.containsKey("database") && map.size() == 1) - return databaseMapper.createDatabase(map.get("database")); - return databaseMapper.createDatabaseWithParameters(map); + return; + if (map.containsKey("database") && map.size() == 1) { + createDatabase(map.get("database")); + return; + } + databaseMapper.createDatabaseWithParameters(map); } // drop database - public int dropDatabase(String dbname) { - return databaseMapper.dropDatabase(dbname); + public void dropDatabase(String dbname) { + databaseMapper.dropDatabase(dbname); } // use database - public int useDatabase(String dbname) { - return databaseMapper.useDatabase(dbname); + public void useDatabase(String dbname) { + databaseMapper.useDatabase(dbname); } } diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SubTableService.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SubTableService.java index 07c315b65a9e07222fe85066c3f1f3b1edb107fe..cea98a1c5d350ed22ed5d26c72fedb212dcb7f26 100644 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SubTableService.java +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SubTableService.java @@ -1,68 +1,63 @@ package com.taosdata.taosdemo.service; +import com.taosdata.taosdemo.components.JdbcTaosdemoConfig; +import com.taosdata.taosdemo.dao.SubTableMapper; +import com.taosdata.taosdemo.dao.SubTableMapperImpl; import com.taosdata.taosdemo.domain.SubTableMeta; import com.taosdata.taosdemo.domain.SubTableValue; -import com.taosdata.taosdemo.mapper.SubTableMapper; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Service; +import com.taosdata.taosdemo.domain.SuperTableMeta; +import com.taosdata.taosdemo.service.data.SubTableMetaGenerator; +import com.taosdata.taosdemo.service.data.SubTableValueGenerator; +import org.apache.log4j.Logger; +import javax.sql.DataSource; import java.util.ArrayList; import java.util.List; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; +import java.util.concurrent.*; +import java.util.stream.Collectors; +import java.util.stream.IntStream; -@Service public class SubTableService extends AbstractService { - @Autowired private SubTableMapper mapper; + private static final Logger logger = Logger.getLogger(SubTableService.class); - /** - * 1. 选择database,找到所有supertable - * 2. 选择supertable,可以拿到表结构,包括field和tag - * 3. 指定子表的前缀和个数 - * 4. 指定创建子表的线程数 - */ - //TODO:指定database、supertable、子表前缀、子表个数、线程数 + public SubTableService(DataSource datasource) { + this.mapper = new SubTableMapperImpl(datasource); + } - // 多线程创建表,指定线程个数 - public int createSubTable(List subTables, int threadSize) { - ExecutorService executor = Executors.newFixedThreadPool(threadSize); - List> futureList = new ArrayList<>(); - for (SubTableMeta subTableMeta : subTables) { - Future future = executor.submit(() -> createSubTable(subTableMeta)); - futureList.add(future); + public void createSubTable(SuperTableMeta superTableMeta, long numOfTables, String prefixOfTable, int numOfThreadsForCreate) { + ExecutorService executor = Executors.newFixedThreadPool(numOfThreadsForCreate); + for (long i = 0; i < numOfTables; i++) { + long tableIndex = i; + executor.execute(() -> createSubTable(superTableMeta, prefixOfTable + (tableIndex + 1))); } executor.shutdown(); - return getAffectRows(futureList); + try { + executor.awaitTermination(Long.MAX_VALUE,TimeUnit.NANOSECONDS); + } catch (InterruptedException e) { + e.printStackTrace(); + } } - - // 创建一张子表,可以指定database,supertable,tablename,tag值 - public int createSubTable(SubTableMeta subTableMeta) { - return mapper.createUsingSuperTable(subTableMeta); + public void createSubTable(SuperTableMeta superTableMeta, String tableName) { + // 构造数据 + SubTableMeta meta = SubTableMetaGenerator.generate(superTableMeta, tableName); + createSubTable(meta); } - // 单线程创建多张子表,每张子表分别可以指定自己的database,supertable,tablename,tag值 - public int createSubTable(List subTables) { - return createSubTable(subTables, 1); + // 创建一张子表,可以指定database,supertable,tablename,tag值 + public void createSubTable(SubTableMeta subTableMeta) { + mapper.createUsingSuperTable(subTableMeta); } /*************************************************************************************************************************/ // 插入:多线程,多表 - public int insert(List subTableValues, int threadSize) { + public int insert(List subTableValues, int threadSize, int frequency) { ExecutorService executor = Executors.newFixedThreadPool(threadSize); Future future = executor.submit(() -> insert(subTableValues)); executor.shutdown(); - return getAffectRows(future); - } - - // 插入:多线程,多表, 自动建表 - public int insertAutoCreateTable(List subTableValues, int threadSize) { - ExecutorService executor = Executors.newFixedThreadPool(threadSize); - Future future = executor.submit(() -> insertAutoCreateTable(subTableValues)); - executor.shutdown(); + //TODO:frequency return getAffectRows(future); } @@ -73,7 +68,7 @@ public class SubTableService extends AbstractService { // 插入: 多表,insert into xxx values(),()... xxx values(),()... public int insert(List subTableValues) { - return mapper.insertMultiTableMultiValuesUsingSuperTable(subTableValues); + return mapper.insertMultiTableMultiValues(subTableValues); } // 插入:单表,自动建表, insert into xxx using xxx tags(...) values(),()... @@ -86,33 +81,128 @@ public class SubTableService extends AbstractService { return mapper.insertMultiTableMultiValuesUsingSuperTable(subTableValues); } + public int insertMultiThreads(SuperTableMeta superTableMeta, int threadSize, long tableSize, long startTime, long gap, JdbcTaosdemoConfig config) { + List taskList = new ArrayList<>(); + List threads = IntStream.range(0, threadSize) + .mapToObj(i -> { + long startInd = i * gap; + long endInd = (i + 1) * gap < tableSize ? (i + 1) * gap : tableSize; + FutureTask task = new FutureTask<>( + new InsertTask(superTableMeta, + startInd, endInd, + startTime, config.timeGap, + config.numOfRowsPerTable, config.numOfTablesPerSQL, config.numOfValuesPerSQL, + config.order, config.rate, config.range, + config.prefixOfTable, config.autoCreateTable) + ); + taskList.add(task); + return new Thread(task, "InsertThread-" + i); + }).collect(Collectors.toList()); + + threads.stream().forEach(Thread::start); + for (Thread thread : threads) { + try { + thread.join(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + + int affectedRows = 0; + for (FutureTask task : taskList) { + try { + affectedRows += task.get(); + } catch (InterruptedException e) { + e.printStackTrace(); + } catch (ExecutionException e) { + e.printStackTrace(); + } + } + + return affectedRows; + } -// ExecutorService executors = Executors.newFixedThreadPool(threadSize); -// int count = 0; -// -// // -// List subTableValues = new ArrayList<>(); -// for (int tableIndex = 1; tableIndex <= numOfTablesPerSQL; tableIndex++) { -// // each table -// SubTableValue subTableValue = new SubTableValue(); -// subTableValue.setDatabase(); -// subTableValue.setName(); -// subTableValue.setSupertable(); -// -// List values = new ArrayList<>(); -// for (int valueCnt = 0; valueCnt < numOfValuesPerSQL; valueCnt++) { -// List fields = new ArrayList<>(); -// for (int fieldInd = 0; fieldInd <; fieldInd++) { -// FieldValue field = new FieldValue<>("", ""); -// fields.add(field); -// } -// RowValue row = new RowValue(); -// row.setFields(fields); -// values.add(row); -// } -// subTableValue.setValues(values); -// subTableValues.add(subTableValue); -// } + private class InsertTask implements Callable { + + private final long startTableInd; // included + private final long endTableInd; // excluded + private final long startTime; + private final long timeGap; + private final long numOfRowsPerTable; + private long numOfTablesPerSQL; + private long numOfValuesPerSQL; + private final SuperTableMeta superTableMeta; + private final int order; + private final int rate; + private final long range; + private final String prefixOfTable; + private final boolean autoCreateTable; + + public InsertTask(SuperTableMeta superTableMeta, long startTableInd, long endTableInd, + long startTime, long timeGap, + long numOfRowsPerTable, long numOfTablesPerSQL, long numOfValuesPerSQL, + int order, int rate, long range, + String prefixOfTable, boolean autoCreateTable) { + this.superTableMeta = superTableMeta; + this.startTableInd = startTableInd; + this.endTableInd = endTableInd; + this.startTime = startTime; + this.timeGap = timeGap; + this.numOfRowsPerTable = numOfRowsPerTable; + this.numOfTablesPerSQL = numOfTablesPerSQL; + this.numOfValuesPerSQL = numOfValuesPerSQL; + this.order = order; + this.rate = rate; + this.range = range; + this.prefixOfTable = prefixOfTable; + this.autoCreateTable = autoCreateTable; + } + + + @Override + public Integer call() { + + long numOfTables = endTableInd - startTableInd; + if (numOfRowsPerTable < numOfValuesPerSQL) + numOfValuesPerSQL = (int) numOfRowsPerTable; + if (numOfTables < numOfTablesPerSQL) + numOfTablesPerSQL = (int) numOfTables; + + int affectRows = 0; + // row + for (long rowCnt = 0; rowCnt < numOfRowsPerTable; ) { + long rowSize = numOfValuesPerSQL; + if (rowCnt + rowSize > numOfRowsPerTable) { + rowSize = numOfRowsPerTable - rowCnt; + } + //table + for (long tableCnt = startTableInd; tableCnt < endTableInd; ) { + long tableSize = numOfTablesPerSQL; + if (tableCnt + tableSize > endTableInd) { + tableSize = endTableInd - tableCnt; + } + long startTime = this.startTime + rowCnt * timeGap; +// System.out.println(Thread.currentThread().getName() + " >>> " + "rowCnt: " + rowCnt + ", rowSize: " + rowSize + ", " + "tableCnt: " + tableCnt + ",tableSize: " + tableSize + ", " + "startTime: " + startTime + ",timeGap: " + timeGap + ""); + /***********************************************/ + // 生成数据 + List data = SubTableValueGenerator.generate(superTableMeta, prefixOfTable, tableCnt, tableSize, rowSize, startTime, timeGap); + // 乱序 + if (order != 0) + SubTableValueGenerator.disrupt(data, rate, range); + // insert + if (autoCreateTable) + affectRows += insertAutoCreateTable(data); + else + affectRows += insert(data); + /***********************************************/ + tableCnt += tableSize; + } + rowCnt += rowSize; + } + + return affectRows; + } + } } diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SuperTableService.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SuperTableService.java index 7f6836c999ba0eb2e6eaab94e9e026f3856d8faf..b91348e2d008bc1ac32faffc5912a8509adf42bd 100644 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SuperTableService.java +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SuperTableService.java @@ -1,19 +1,22 @@ package com.taosdata.taosdemo.service; +import com.taosdata.taosdemo.dao.SuperTableMapper; +import com.taosdata.taosdemo.dao.SuperTableMapperImpl; import com.taosdata.taosdemo.domain.SuperTableMeta; -import com.taosdata.taosdemo.mapper.SuperTableMapper; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Service; -@Service +import javax.sql.DataSource; + public class SuperTableService { - @Autowired private SuperTableMapper superTableMapper; + public SuperTableService(DataSource dataSource) { + this.superTableMapper = new SuperTableMapperImpl(dataSource); + } + // 创建超级表,指定每个field的名称和类型,每个tag的名称和类型 - public int create(SuperTableMeta superTableMeta) { - return superTableMapper.createSuperTable(superTableMeta); + public void create(SuperTableMeta superTableMeta) { + superTableMapper.createSuperTable(superTableMeta); } public void drop(String database, String name) { diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/TableService.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/TableService.java index bada6de708e44e04237a7e6a3b734eb46974e21f..b4ad2d17e58a3f7c04665707f0cd3e7327d7c16c 100644 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/TableService.java +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/TableService.java @@ -1,41 +1,25 @@ package com.taosdata.taosdemo.service; +import com.taosdata.taosdemo.dao.TableMapper; import com.taosdata.taosdemo.domain.TableMeta; -import com.taosdata.taosdemo.mapper.TableMapper; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; -import java.util.ArrayList; import java.util.List; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; @Service public class TableService extends AbstractService { - @Autowired private TableMapper tableMapper; //创建一张表 - public int create(TableMeta tableMeta) { - return tableMapper.create(tableMeta); + public void create(TableMeta tableMeta) { + tableMapper.create(tableMeta); } //创建多张表 - public int create(List tables) { - return create(tables, 1); - } - - //多线程创建多张表 - public int create(List tables, int threadSize) { - ExecutorService executors = Executors.newFixedThreadPool(threadSize); - List> futures = new ArrayList<>(); - for (TableMeta table : tables) { - Future future = executors.submit(() -> create(table)); - futures.add(future); - } - return getAffectRows(futures); + public void create(List tables) { + tables.stream().forEach(this::create); } diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SubTableMetaGenerator.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SubTableMetaGenerator.java index d15ad0d8bdd066a9dea50251094657d20fdc4fd7..88e3c0d26a5a7558c1c07f0fc38ae21710438dbe 100644 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SubTableMetaGenerator.java +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SubTableMetaGenerator.java @@ -27,4 +27,16 @@ public class SubTableMetaGenerator { return subTableMetaList; } + public static SubTableMeta generate(SuperTableMeta superTableMeta, String tableName) { + SubTableMeta subTableMeta = new SubTableMeta(); + // create table xxx.xxx using xxx tags(...) + subTableMeta.setDatabase(superTableMeta.getDatabase()); + subTableMeta.setName(tableName); + subTableMeta.setSupertable(superTableMeta.getName()); + subTableMeta.setFields(superTableMeta.getFields()); + List tagValues = TagValueGenerator.generate(superTableMeta.getTags()); + subTableMeta.setTags(tagValues); + return subTableMeta; + } + } diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SubTableValueGenerator.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SubTableValueGenerator.java index a36f718f83e20e2659322f0f23d6375ebfa3af0d..8c318dbd3abf2ddfec8b51e83f32246421c49d51 100644 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SubTableValueGenerator.java +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SubTableValueGenerator.java @@ -1,8 +1,6 @@ package com.taosdata.taosdemo.service.data; -import com.taosdata.taosdemo.domain.RowValue; -import com.taosdata.taosdemo.domain.SubTableMeta; -import com.taosdata.taosdemo.domain.SubTableValue; +import com.taosdata.taosdemo.domain.*; import com.taosdata.taosdemo.utils.TimeStampUtil; import org.springframework.beans.BeanUtils; @@ -11,22 +9,26 @@ import java.util.List; public class SubTableValueGenerator { - public static List generate(List subTableMetaList, int numOfRowsPerTable, long start, long timeGap) { - List subTableValueList = new ArrayList<>(); - - subTableMetaList.stream().forEach((subTableMeta) -> { - // insert into xxx.xxx using xxxx tags(...) values(),()... + public static List generate(SuperTableMeta superTableMeta, String prefixOfTables, long tableIndex, long tableSize, long valueSize, long startTime, long timeGap) { + List subTableValues = new ArrayList<>(); + for (int i = 1; i <= tableSize; i++) { SubTableValue subTableValue = new SubTableValue(); - subTableValue.setDatabase(subTableMeta.getDatabase()); - subTableValue.setName(subTableMeta.getName()); - subTableValue.setSupertable(subTableMeta.getSupertable()); - subTableValue.setTags(subTableMeta.getTags()); - TimeStampUtil.TimeTuple tuple = TimeStampUtil.range(start, timeGap, numOfRowsPerTable); - List values = FieldValueGenerator.generate(tuple.start, tuple.end, tuple.timeGap, subTableMeta.getFields()); + subTableValue.setDatabase(superTableMeta.getDatabase()); + subTableValue.setName(prefixOfTables + (tableIndex + i)); + subTableValue.setSupertable(superTableMeta.getName()); + TimeStampUtil.TimeTuple tuple = TimeStampUtil.range(startTime, timeGap, valueSize); + List tags = TagValueGenerator.generate(superTableMeta.getTags()); + subTableValue.setTags(tags); + List values = FieldValueGenerator.generate(tuple.start, tuple.end, tuple.timeGap, superTableMeta.getFields()); subTableValue.setValues(values); - subTableValueList.add(subTableValue); - }); - return subTableValueList; + + subTableValues.add(subTableValue); + } + return subTableValues; + } + + public static List generate(List subTableMetaList, int numOfRowsPerTable, long start, long timeGap) { + return generate(subTableMetaList, 0, subTableMetaList.size(), numOfRowsPerTable, start, timeGap); } public static void disrupt(List subTableValueList, int rate, long range) { @@ -38,12 +40,10 @@ public class SubTableValueGenerator { public static List> split(List subTableValueList, int numOfTables, int numOfTablesPerSQL, int numOfRowsPerTable, int numOfValuesPerSQL) { List> dataList = new ArrayList<>(); - if (numOfRowsPerTable < numOfValuesPerSQL) numOfValuesPerSQL = numOfRowsPerTable; if (numOfTables < numOfTablesPerSQL) numOfTablesPerSQL = numOfTables; - //table for (int tableCnt = 0; tableCnt < numOfTables; ) { int tableSize = numOfTablesPerSQL; @@ -81,4 +81,20 @@ public class SubTableValueGenerator { split(null, 99, 10, 99, 10); } + public static List generate(List subTableMetaList, int tableCnt, int tableSize, int rowSize, long startTime, long timeGap) { + List subTableValueList = new ArrayList<>(); + for (int i = 0; i < tableSize; i++) { + SubTableMeta subTableMeta = subTableMetaList.get(tableCnt + i); + SubTableValue subTableValue = new SubTableValue(); + subTableValue.setDatabase(subTableMeta.getDatabase()); + subTableValue.setName(subTableMeta.getName()); + subTableValue.setSupertable(subTableMeta.getSupertable()); + subTableValue.setTags(subTableMeta.getTags()); + TimeStampUtil.TimeTuple tuple = TimeStampUtil.range(startTime, timeGap, rowSize); + List values = FieldValueGenerator.generate(tuple.start, tuple.end, tuple.timeGap, subTableMeta.getFields()); + subTableValue.setValues(values); + subTableValueList.add(subTableValue); + } + return subTableValueList; + } } diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/SqlSpeller.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/SqlSpeller.java new file mode 100644 index 0000000000000000000000000000000000000000..a60f0641d3a4441195c3a60639fbe3a197115dc3 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/SqlSpeller.java @@ -0,0 +1,194 @@ +package com.taosdata.taosdemo.utils; + +import com.taosdata.taosdemo.domain.*; + +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +public class SqlSpeller { + + // create database if not exists xxx keep xx days xx replica xx cache xx... + public static String createDatabase(Map map) { + StringBuilder sb = new StringBuilder(); + sb.append("create database if not exists ").append(map.get("database")).append(" "); + if (map.containsKey("keep")) + sb.append("keep ").append(map.get("keep")).append(" "); + if (map.containsKey("days")) + sb.append("days ").append(map.get("days")).append(" "); + if (map.containsKey("replica")) + sb.append("replica ").append(map.get("replica")).append(" "); + if (map.containsKey("cache")) + sb.append("cache ").append(map.get("cache")).append(" "); + if (map.containsKey("blocks")) + sb.append("blocks ").append(map.get("blocks")).append(" "); + if (map.containsKey("minrows")) + sb.append("minrows ").append(map.get("minrows")).append(" "); + if (map.containsKey("maxrows")) + sb.append("maxrows ").append(map.get("maxrows")).append(" "); + if (map.containsKey("precision")) + sb.append("precision ").append(map.get("precision")).append(" "); + if (map.containsKey("comp")) + sb.append("comp ").append(map.get("comp")).append(" "); + if (map.containsKey("walLevel")) + sb.append("walLevel ").append(map.get("walLevel")).append(" "); + if (map.containsKey("quorum")) + sb.append("quorum ").append(map.get("quorum")).append(" "); + if (map.containsKey("fsync")) + sb.append("fsync ").append(map.get("fsync")).append(" "); + if (map.containsKey("update")) + sb.append("update ").append(map.get("update")).append(" "); + return sb.toString(); + } + + // create table if not exists xx.xx using xx.xx tags(x,x,x) + public static String createTableUsingSuperTable(SubTableMeta subTableMeta) { + StringBuilder sb = new StringBuilder(); + sb.append("create table if not exists ").append(subTableMeta.getDatabase()).append(".").append(subTableMeta.getName()).append(" "); + sb.append("using ").append(subTableMeta.getDatabase()).append(".").append(subTableMeta.getSupertable()).append(" "); +// String tagStr = subTableMeta.getTags().stream().filter(Objects::nonNull) +// .map(tagValue -> tagValue.getName() + " '" + tagValue.getValue() + "' ") +// .collect(Collectors.joining(",", "(", ")")); + sb.append("tags ").append(tagValues(subTableMeta.getTags())); + return sb.toString(); + } + + // insert into xx.xxx values(x,x,x),(x,x,x)... + public static String insertOneTableMultiValues(SubTableValue subTableValue) { + StringBuilder sb = new StringBuilder(); + sb.append("insert into ").append(subTableValue.getDatabase()).append(".").append(subTableValue.getName() + " "); + sb.append("values").append(rowValues(subTableValue.getValues())); + return sb.toString(); + } + + //f1, f2, f3 + private static String fieldValues(List fields) { + return IntStream.range(0, fields.size()).mapToObj(i -> { + if (i == 0) { + return "" + fields.get(i).getValue() + ""; + } else { + return "'" + fields.get(i).getValue() + "'"; + } + }).collect(Collectors.joining(",", "(", ")")); + +// return fields.stream() +// .filter(Objects::nonNull) +// .map(fieldValue -> "'" + fieldValue.getValue() + "'") +// .collect(Collectors.joining(",", "(", ")")); + } + + //(f1, f2, f3),(f1, f2, f3) + private static String rowValues(List rowValues) { + return rowValues.stream().filter(Objects::nonNull) + .map(rowValue -> fieldValues(rowValue.getFields())) + .collect(Collectors.joining(",", "", "")); + } + + // insert into xx.xxx using xx.xx tags(x,x,x) values(x,x,x),(x,x,x)... + public static String insertOneTableMultiValuesUsingSuperTable(SubTableValue subTableValue) { + StringBuilder sb = new StringBuilder(); + sb.append("insert into ").append(subTableValue.getDatabase()).append(".").append(subTableValue.getName()).append(" "); + sb.append("using ").append(subTableValue.getDatabase()).append(".").append(subTableValue.getSupertable()).append(" "); + sb.append("tags ").append(tagValues(subTableValue.getTags()) + " "); + sb.append("values ").append(rowValues(subTableValue.getValues())); + return sb.toString(); + } + + // (t1,t2,t3...) + private static String tagValues(List tags) { + return tags.stream().filter(Objects::nonNull) + .map(tagValue -> "'" + tagValue.getValue() + "'") + .collect(Collectors.joining(",", "(", ")")); + } + + // insert into xx.xx values(),(),()... xx.xx values(),()... + public static String insertMultiSubTableMultiValues(List tables) { + return "insert into " + tables.stream().filter(Objects::nonNull) + .map(table -> table.getDatabase() + "." + table.getName() + " values " + rowValues(table.getValues())) + .collect(Collectors.joining(" ", "", "")); + } + + // insert into xx.xx using xx.xx tags(xx,xx) values(),()... + public static String insertMultiTableMultiValuesUsingSuperTable(List tables) { + return "insert into " + tables.stream().filter(Objects::nonNull) + .map(table -> { + StringBuilder sb = new StringBuilder(); + sb.append(table.getDatabase()).append(".").append(table.getName()); + sb.append(" using ").append(table.getDatabase()).append(".").append(table.getSupertable()); + sb.append(" tags ").append(tagValues(table.getTags())); + sb.append(" values ").append(rowValues(table.getValues())); + return sb.toString(); + }).collect(Collectors.joining(" ")); + } + + // create table if not exists xx.xx (f1 xx,f2 xx...) tags(t1 xx, t2 xx...) + public static String createSuperTable(SuperTableMeta tableMetadata) { + StringBuilder sb = new StringBuilder(); + sb.append("create table if not exists ").append(tableMetadata.getDatabase()).append(".").append(tableMetadata.getName()); + String fields = tableMetadata.getFields().stream() + .filter(Objects::nonNull).map(field -> field.getName() + " " + field.getType() + " ") + .collect(Collectors.joining(",", "(", ")")); + sb.append(fields); + sb.append(" tags "); + String tags = tableMetadata.getTags().stream().filter(Objects::nonNull) + .map(tag -> tag.getName() + " " + tag.getType() + " ") + .collect(Collectors.joining(",", "(", ")")); + sb.append(tags); + return sb.toString(); + } + + + public static String createTable(TableMeta tableMeta) { + StringBuilder sb = new StringBuilder(); + sb.append("create table if not exists ").append(tableMeta.getDatabase()).append(".").append(tableMeta.getName()).append(" "); + String fields = tableMeta.getFields().stream() + .filter(Objects::nonNull).map(field -> field.getName() + " " + field.getType() + " ") + .collect(Collectors.joining(",", "(", ")")); + sb.append(fields); + return sb.toString(); + } + + // insert into xx.xx values() + public static String insertOneTableMultiValues(TableValue table) { + StringBuilder sb = new StringBuilder(); + sb.append("insert into ").append(table.getDatabase()).append(".").append(table.getName() + " "); + sb.append("values").append(rowValues(table.getValues())); + return sb.toString(); + + } + + // insert into xx.xx (f1, f2, f3...) values(xx,xx,xx),(xx,xx,xx)... + public static String insertOneTableMultiValuesWithColumns(TableValue table) { + StringBuilder sb = new StringBuilder(); + sb.append("insert into ").append(table.getDatabase()).append(".").append(table.getName()).append(" "); + sb.append(columnNames(table.getColumns())); + sb.append(" values ").append(rowValues(table.getValues())); + return sb.toString(); + } + + // (f1, f2, f3...) + private static String columnNames(List fields) { + return fields.stream() + .filter(Objects::nonNull) + .map(column -> column.getName() + " ") + .collect(Collectors.joining(",", "(", ")")); + } + + public static String insertMultiTableMultiValuesWithColumns(List tables) { + StringBuilder sb = new StringBuilder(); + sb.append("insert into ").append(tables.stream().filter(Objects::nonNull) + .map(table -> table.getDatabase() + "." + table.getName() + " " + columnNames(table.getColumns()) + " values " + rowValues(table.getValues())) + .collect(Collectors.joining(" "))); + return sb.toString(); + } + + public static String insertMultiTableMultiValues(List tables) { + StringBuilder sb = new StringBuilder(); + sb.append("insert into ").append(tables.stream().filter(Objects::nonNull).map(table -> + table.getDatabase() + "." + table.getName() + " values " + rowValues(table.getValues()) + ).collect(Collectors.joining(" "))); + return sb.toString(); + } +} diff --git a/tests/examples/JDBC/taosdemo/src/main/resources/application.properties b/tests/examples/JDBC/taosdemo/src/main/resources/application.properties index 1e7a7de89fa495b8898a58805bd9e256db1d69ad..6fd38f1762e6879422cf26fe2dc617caeb5e297c 100644 --- a/tests/examples/JDBC/taosdemo/src/main/resources/application.properties +++ b/tests/examples/JDBC/taosdemo/src/main/resources/application.properties @@ -1,14 +1,5 @@ -#spring.datasource.url=jdbc:mysql://master:3306/?useSSL=false&useUnicode=true&characterEncoding=UTF-8 -#spring.datasource.driver-class-name=com.mysql.jdbc.Driver -#spring.datasource.username=root -#spring.datasource.password=123456 - -spring.datasource.url=jdbc:TAOS://master:6030/?charset=UTF-8&locale=en_US.UTF-8&timezone=UTC-8 -spring.datasource.driver-class-name=com.taosdata.jdbc.TSDBDriver -spring.datasource.username=root -spring.datasource.password=taosdata - -spring.datasource.hikari.maximum-pool-size=10 -spring.datasource.hikari.minimum-idle=10 -spring.datasource.hikari.max-lifetime=600000 -logging.level.com.taosdata.taosdemo.mapper=debug \ No newline at end of file +jdbc.driver=com.taosdata.jdbc.rs.RestfulDriver +#jdbc.driver=com.taosdata.jdbc.TSDBDriver +hikari.maximum-pool-size=1 +hikari.minimum-idle=1 +hikari.max-lifetime=0 \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/main/resources/insert.json b/tests/examples/JDBC/taosdemo/src/main/resources/insert.json new file mode 100644 index 0000000000000000000000000000000000000000..a7bd87e6d3bbf9f6ec1b0a68d31c4da6c620c994 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/resources/insert.json @@ -0,0 +1,119 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 2, + "databases": [ + { + "dbinfo": { + "name": "db04", + "drop": "no", + "replica": 1, + "days": 2, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp": 2, + "walLevel": 1, + "quorum": 1, + "fsync": 3000, + "update": 0 + }, + "super_tables": [ + { + "name": "stb04", + "child_table_exists": "no", + "childtable_count": 10, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "data_source": "rand", + "insert_mode": "taosc", + "insert_rate": 0, + "insert_rows": 100, + "multi_thread_write_one_tbl": "no", + "number_of_tbl_in_one_sql": 0, + "rows_per_tbl": 3, + "max_sql_len": 1024, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [ + { + "type": "TINYINT" + }, + { + "type": "SMALLINT" + }, + { + "type": "INT" + }, + { + "type": "BIGINT" + }, + { + "type": "BOOL" + }, + { + "type": "FLOAT" + }, + { + "type": "DOUBLE" + }, + { + "type": "TIMESTAMP" + }, + { + "type": "BINARY", + "len": 16 + }, + { + "type": "NCHAR", + "len": 4 + } + ], + "tags": [ + { + "type": "TINYINT" + }, + { + "type": "SMALLINT" + }, + { + "type": "INT" + }, + { + "type": "BIGINT" + }, + { + "type": "BOOL" + }, + { + "type": "FLOAT" + }, + { + "type": "DOUBLE" + }, + { + "type": "BINARY", + "len": 16 + }, + { + "type": "NCHAR", + "len": 4 + } + ] + } + ] + } + ] +} diff --git a/tests/examples/JDBC/taosdemo/src/main/resources/lib/taos-jdbcdriver-2.0.15-dist.jar b/tests/examples/JDBC/taosdemo/src/main/resources/lib/taos-jdbcdriver-2.0.15-dist.jar new file mode 100644 index 0000000000000000000000000000000000000000..58508b92e5afec7e400691d213940536e8abe5f6 Binary files /dev/null and b/tests/examples/JDBC/taosdemo/src/main/resources/lib/taos-jdbcdriver-2.0.15-dist.jar differ diff --git a/tests/examples/JDBC/taosdemo/src/main/resources/log4j.properties b/tests/examples/JDBC/taosdemo/src/main/resources/log4j.properties index 1299357be3d2e99ca6b79227f14ca7a587718914..b2a9586ea78e2d55a0091097097988b038267405 100644 --- a/tests/examples/JDBC/taosdemo/src/main/resources/log4j.properties +++ b/tests/examples/JDBC/taosdemo/src/main/resources/log4j.properties @@ -1,5 +1,5 @@ ### 设置### -log4j.rootLogger=debug,stdout,DebugLog,ErrorLog +log4j.rootLogger=error,stdout ### 输出信息到控制抬 ### log4j.appender.stdout=org.apache.log4j.ConsoleAppender log4j.appender.stdout.Target=System.out diff --git a/tests/examples/JDBC/taosdemo/src/main/resources/query.json b/tests/examples/JDBC/taosdemo/src/main/resources/query.json new file mode 100644 index 0000000000000000000000000000000000000000..53d0b319212196257aa3e84be1221bd6e2bd0d8d --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/resources/query.json @@ -0,0 +1,17 @@ +{ + "filetype":"query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "databases": "db01", + "super_table_query": + {"rate":1, "concurrent":1, + "sqls": [{"sql": "select count(*) from stb01", "result": "./query_res0.txt"}] + }, + "sub_table_query": + {"stblname": "stb01", "rate":1, "threads":1, + "sqls": [{"sql": "select count(*) from xxxx", "result": "./query_res1.txt"}] + } +} diff --git a/tests/examples/JDBC/taosdemo/src/main/resources/templates/index.html b/tests/examples/JDBC/taosdemo/src/main/resources/templates/index.html index 69f8851c9b83eaf2768331ea713c212fe500992d..953a058032612c7eac8eacc68f738d5fef03ec39 100644 --- a/tests/examples/JDBC/taosdemo/src/main/resources/templates/index.html +++ b/tests/examples/JDBC/taosdemo/src/main/resources/templates/index.html @@ -5,6 +5,6 @@ Index -

Hello~~~

+

Developing~~~

\ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/TaosdemoApplicationTests.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/TaosdemoApplicationTests.java deleted file mode 100644 index e8725091873c2dad5cb9e4a8c2735bf7418f4ef5..0000000000000000000000000000000000000000 --- a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/TaosdemoApplicationTests.java +++ /dev/null @@ -1,13 +0,0 @@ -package com.taosdata.taosdemo; - -import org.junit.jupiter.api.Test; -import org.springframework.boot.test.context.SpringBootTest; - -@SpringBootTest -class TaosdemoApplicationTests { - - @Test - void contextLoads() { - } - -} diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/DatabaseMapperTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/DatabaseMapperTest.java deleted file mode 100644 index 8364e16ed06b8d9a4ae74cd370281c19149989d2..0000000000000000000000000000000000000000 --- a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/DatabaseMapperTest.java +++ /dev/null @@ -1,42 +0,0 @@ -package com.taosdata.taosdemo.mapper; - -import org.junit.Test; -import org.junit.runner.RunWith; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.test.context.junit4.SpringRunner; - -import java.util.HashMap; -import java.util.Map; - -@RunWith(SpringRunner.class) -@SpringBootTest -public class DatabaseMapperTest { - @Autowired - private DatabaseMapper databaseMapper; - - @Test - public void createDatabase() { - databaseMapper.createDatabase("db_test"); - } - - @Test - public void dropDatabase() { - databaseMapper.dropDatabase("db_test"); - } - - @Test - public void creatDatabaseWithParameters() { - Map map = new HashMap<>(); - map.put("dbname", "weather"); - map.put("keep", "3650"); - map.put("days", "30"); - map.put("replica", "1"); - databaseMapper.createDatabaseWithParameters(map); - } - - @Test - public void useDatabase() { - databaseMapper.useDatabase("test"); - } -} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/SubTableMapperTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/SubTableMapperTest.java deleted file mode 100644 index 90faa20496c5ecfe64c929910df4028099f974ad..0000000000000000000000000000000000000000 --- a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/SubTableMapperTest.java +++ /dev/null @@ -1,88 +0,0 @@ -package com.taosdata.taosdemo.mapper; - -import com.taosdata.taosdemo.domain.*; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.test.context.junit4.SpringRunner; - -import java.util.ArrayList; -import java.util.List; - -@RunWith(SpringRunner.class) -@SpringBootTest -public class SubTableMapperTest { - @Autowired - private SubTableMapper subTableMapper; - private List tables; - - @Test - public void createUsingSuperTable() { - SubTableMeta subTableMeta = new SubTableMeta(); - subTableMeta.setDatabase("test"); - subTableMeta.setSupertable("weather"); - subTableMeta.setName("t1"); - List tags = new ArrayList<>(); - for (int i = 0; i < 3; i++) { - tags.add(new TagValue("tag" + (i + 1), "nchar(64)")); - } - subTableMeta.setTags(tags); - subTableMapper.createUsingSuperTable(subTableMeta); - } - - @Test - public void insertOneTableMultiValues() { - subTableMapper.insertOneTableMultiValues(tables.get(0)); - } - - @Test - public void insertOneTableMultiValuesUsingSuperTable() { - subTableMapper.insertOneTableMultiValuesUsingSuperTable(tables.get(0)); - } - - - @Test - public void insertMultiTableMultiValues() { - subTableMapper.insertMultiTableMultiValues(tables); - } - - @Test - public void insertMultiTableMultiValuesUsingSuperTable() { - subTableMapper.insertMultiTableMultiValuesUsingSuperTable(tables); - } - - - @Before - public void before() { - tables = new ArrayList<>(); - for (int ind = 0; ind < 3; ind++) { - - SubTableValue table = new SubTableValue(); - table.setDatabase("test"); - // supertable - table.setSupertable("weather"); - table.setName("t" + (ind + 1)); - // tags - List tags = new ArrayList<>(); - for (int i = 0; i < 3; i++) { - tags.add(new TagValue("tag" + (i + 1), "beijing")); - } - table.setTags(tags); - // values - List values = new ArrayList<>(); - for (int i = 0; i < 2; i++) { - List fields = new ArrayList<>(); - for (int j = 0; j < 4; j++) { - fields.add(new FieldValue("f" + (j + 1), (j + 1) * 10)); - } - values.add(new RowValue(fields)); - } - table.setValues(values); - - tables.add(table); - } - } - -} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/SuperTableMapperTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/SuperTableMapperTest.java deleted file mode 100644 index 6c97874cfc00b128f4a3e09d9d0f698274baeff0..0000000000000000000000000000000000000000 --- a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/SuperTableMapperTest.java +++ /dev/null @@ -1,50 +0,0 @@ -package com.taosdata.taosdemo.mapper; - -import com.taosdata.taosdemo.domain.FieldMeta; -import com.taosdata.taosdemo.domain.SuperTableMeta; -import com.taosdata.taosdemo.domain.TagMeta; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.test.context.junit4.SpringRunner; - -import java.util.ArrayList; -import java.util.List; - -@RunWith(SpringRunner.class) -@SpringBootTest -public class SuperTableMapperTest { - @Autowired - private SuperTableMapper superTableMapper; - - @Test - public void testCreateSuperTableUsingSQL() { - String sql = "create table test.weather (ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int)"; - superTableMapper.createSuperTableUsingSQL(sql); - } - - @Test - public void createSuperTable() { - SuperTableMeta superTableMeta = new SuperTableMeta(); - superTableMeta.setDatabase("test"); - superTableMeta.setName("weather"); - List fields = new ArrayList<>(); - for (int i = 0; i < 5; i++) { - fields.add(new FieldMeta("f" + (i + 1), "int")); - } - superTableMeta.setFields(fields); - List tags = new ArrayList<>(); - for (int i = 0; i < 3; i++) { - tags.add(new TagMeta("t" + (i + 1), "nchar(64)")); - } - superTableMeta.setTags(tags); - - superTableMapper.createSuperTable(superTableMeta); - } - - @Test - public void dropSuperTable() { - superTableMapper.dropSuperTable("test", "weather"); - } -} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/TableMapperTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/TableMapperTest.java deleted file mode 100644 index 3a051b311268ba076f97edc1b36032eac0b52709..0000000000000000000000000000000000000000 --- a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/TableMapperTest.java +++ /dev/null @@ -1,142 +0,0 @@ -package com.taosdata.taosdemo.mapper; - -import com.taosdata.taosdemo.domain.*; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.test.context.junit4.SpringRunner; - -import java.util.ArrayList; -import java.util.List; -import java.util.Random; - -@SpringBootTest -@RunWith(SpringRunner.class) -public class TableMapperTest { - @Autowired - private TableMapper tableMapper; - private static Random random = new Random(System.currentTimeMillis()); - - @Test - public void create() { - TableMeta table = new TableMeta(); - table.setDatabase("test"); - table.setName("t1"); - List fields = new ArrayList<>(); - for (int i = 0; i < 3; i++) { - FieldMeta field = new FieldMeta(); - field.setName("f" + (i + 1)); - field.setType("nchar(64)"); - fields.add(field); - } - table.setFields(fields); - tableMapper.create(table); - } - - @Test - public void insertOneTableMultiValues() { - TableValue table = new TableValue(); - table.setDatabase("test"); - table.setName("t1"); - List values = new ArrayList<>(); - for (int j = 0; j < 5; j++) { - List fields = new ArrayList<>(); - for (int k = 0; k < 2; k++) { - FieldValue field = new FieldValue<>(); - field.setValue((k + 1) * 100); - fields.add(field); - } - values.add(new RowValue(fields)); - } - table.setValues(values); - - tableMapper.insertOneTableMultiValues(table); - } - - @Test - public void insertOneTableMultiValuesWithCoulmns() { - TableValue tableValue = new TableValue(); - tableValue.setDatabase("test"); - tableValue.setName("weather"); - // columns - List columns = new ArrayList<>(); - for (int i = 0; i < 3; i++) { - FieldMeta field = new FieldMeta(); - field.setName("f" + (i + 1)); - columns.add(field); - } - tableValue.setColumns(columns); - // values - List values = new ArrayList<>(); - for (int i = 0; i < 3; i++) { - List fields = new ArrayList<>(); - for (int j = 0; j < 3; j++) { - FieldValue field = new FieldValue(); - field.setValue(j); - fields.add(field); - } - values.add(new RowValue(fields)); - } - tableValue.setValues(values); - tableMapper.insertOneTableMultiValuesWithColumns(tableValue); - } - - @Test - public void insertMultiTableMultiValues() { - List tables = new ArrayList<>(); - for (int i = 0; i < 3; i++) { - TableValue table = new TableValue(); - table.setDatabase("test"); - table.setName("t" + (i + 1)); - List values = new ArrayList<>(); - for (int j = 0; j < 5; j++) { - List fields = new ArrayList<>(); - for (int k = 0; k < 2; k++) { - FieldValue field = new FieldValue<>(); - field.setValue((k + 1) * 10); - fields.add(field); - } - values.add(new RowValue(fields)); - } - table.setValues(values); - - tables.add(table); - } - tableMapper.insertMultiTableMultiValues(tables); - } - - @Test - public void insertMultiTableMultiValuesWithCoulumns() { - List tables = new ArrayList<>(); - for (int i = 0; i < 3; i++) { - TableValue table = new TableValue(); - table.setDatabase("test"); - table.setName("t" + (i + 1)); - // columns - List columns = new ArrayList<>(); - for (int j = 0; j < 3; j++) { - FieldMeta field = new FieldMeta(); - field.setName("f" + (j + 1)); - columns.add(field); - } - table.setColumns(columns); - // values - List values = new ArrayList<>(); - for (int j = 0; j < 5; j++) { - List fields = new ArrayList<>(); - for (int k = 0; k < columns.size(); k++) { - FieldValue field = new FieldValue<>(); - field.setValue((k + 1) * 10); - fields.add(field); - } - values.add(new RowValue(fields)); - } - table.setValues(values); - - tables.add(table); - } - tableMapper.insertMultiTableMultiValuesWithColumns(tables); - } - -} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/DatabaseServiceTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/DatabaseServiceTest.java index 2c1cdf6e00695aa6fa26395c17458e66626e62ea..621ba7df5da4bb29747a5a4af1f91d51f6a1d7d4 100644 --- a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/DatabaseServiceTest.java +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/DatabaseServiceTest.java @@ -1,15 +1,9 @@ package com.taosdata.taosdemo.service; import org.junit.Test; -import org.junit.runner.RunWith; import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.test.context.junit4.SpringRunner; -@RunWith(SpringRunner.class) -@SpringBootTest public class DatabaseServiceTest { - @Autowired private DatabaseService service; @Test diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/SubTableServiceTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/SubTableServiceTest.java index 4e54de3f1385561107aedbe48d89362961783759..f7e5cd45057472602ad6e7a43a8d8bdb31a02b40 100644 --- a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/SubTableServiceTest.java +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/SubTableServiceTest.java @@ -4,18 +4,12 @@ import com.taosdata.taosdemo.domain.SubTableMeta; import com.taosdata.taosdemo.domain.TagValue; import org.junit.Before; import org.junit.Test; -import org.junit.runner.RunWith; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.test.context.junit4.SpringRunner; import java.util.ArrayList; import java.util.List; -@RunWith(SpringRunner.class) -@SpringBootTest public class SubTableServiceTest { - @Autowired + private SubTableService service; private List subTables; @@ -38,13 +32,11 @@ public class SubTableServiceTest { @Test public void testCreateSubTable() { - int count = service.createSubTable(subTables); - System.out.println("count >>> " + count); + } @Test public void testCreateSubTableList() { - int count = service.createSubTable(subTables, 10); - System.out.println("count >>> " + count); + } } \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/SuperTableServiceTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/SuperTableServiceTest.java index b9291fceaf89d3fdeef4c638b4ace5c995b7e369..33e52af1eaa779e7ed402a63633bf0dbb9fbadd7 100644 --- a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/SuperTableServiceTest.java +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/SuperTableServiceTest.java @@ -4,19 +4,12 @@ import com.taosdata.taosdemo.domain.FieldMeta; import com.taosdata.taosdemo.domain.SuperTableMeta; import com.taosdata.taosdemo.domain.TagMeta; import org.junit.Test; -import org.junit.runner.RunWith; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.test.context.junit4.SpringRunner; import java.util.ArrayList; import java.util.List; -@RunWith(SpringRunner.class) -@SpringBootTest public class SuperTableServiceTest { - @Autowired private SuperTableService service; @Test diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/TableServiceTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/TableServiceTest.java index fdbd5546294ce43bc147ddad314d1b9498f1d5ad..1f52198d68823326dd81d8c419fc02d89e15ef2d 100644 --- a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/TableServiceTest.java +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/TableServiceTest.java @@ -3,18 +3,11 @@ package com.taosdata.taosdemo.service; import com.taosdata.taosdemo.domain.TableMeta; import org.junit.Before; import org.junit.Test; -import org.junit.runner.RunWith; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.test.context.junit4.SpringRunner; import java.util.ArrayList; import java.util.List; -@RunWith(SpringRunner.class) -@SpringBootTest public class TableServiceTest { - @Autowired private TableService tableService; private List tables; @@ -32,12 +25,7 @@ public class TableServiceTest { @Test public void testCreate() { - int count = tableService.create(tables); - System.out.println(count); + tableService.create(tables); } - @Test - public void testCreateMultiThreads() { - System.out.println(tableService.create(tables, 10)); - } } \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/utils/SqlSpellerTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/utils/SqlSpellerTest.java new file mode 100644 index 0000000000000000000000000000000000000000..daabd51ca75a6c9f4bfeead0b747c4de69f40a7a --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/utils/SqlSpellerTest.java @@ -0,0 +1,254 @@ +package com.taosdata.taosdemo.utils; + +import com.taosdata.taosdemo.domain.*; +import org.junit.Before; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; + +public class SqlSpellerTest { + + @Test + public void createDatabase() { + HashMap map = new HashMap<>(); + map.put("database", "jdbcdb"); + map.put("keep", "3650"); + map.put("days", "30"); + map.put("replica", "1"); + map.put("minRows", "100"); + map.put("maxRows", "1000"); + map.put("cache", "16"); + map.put("blocks", "8"); + map.put("precision", "ms"); + map.put("comp", "2"); + map.put("walLevel", "1"); + map.put("quorum", "1"); + map.put("fsync", "3000"); + map.put("update", "0"); + String sql = SqlSpeller.createDatabase(map); + System.out.println(sql); + } + + @Test + public void createTableUsingSuperTable() { + SubTableMeta subTableMeta = new SubTableMeta(); + subTableMeta.setDatabase("test"); + subTableMeta.setSupertable("weather"); + subTableMeta.setName("t1"); + List tags = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + tags.add(new TagValue("tag" + (i + 1), "nchar(64)")); + } + subTableMeta.setTags(tags); + String sql = SqlSpeller.createTableUsingSuperTable(subTableMeta); + System.out.println(sql); + } + + @Test + public void insertOneTableMultiValues() { + String sql = SqlSpeller.insertOneTableMultiValues(tables.get(0)); + System.out.println(sql); + } + + @Test + public void insertOneTableMultiValuesUsingSuperTable() { + String sql = SqlSpeller.insertOneTableMultiValuesUsingSuperTable(tables.get(0)); + System.out.println(sql); + } + + @Test + public void insertMultiTableMultiValues() { + String sql = SqlSpeller.insertMultiSubTableMultiValues(tables); + System.out.println(sql); + } + + @Test + public void insertMultiTableMultiValuesUsingSuperTable() { + String sql = SqlSpeller.insertMultiTableMultiValuesUsingSuperTable(tables); + System.out.println(sql); + } + + private List tables; + + @Before + public void before() { + tables = new ArrayList<>(); + for (int ind = 0; ind < 3; ind++) { + SubTableValue table = new SubTableValue(); + table.setDatabase("test"); + // supertable + table.setSupertable("weather"); + table.setName("t" + (ind + 1)); + // tags + List tags = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + tags.add(new TagValue("tag" + (i + 1), "beijing")); + } + table.setTags(tags); + // values + List values = new ArrayList<>(); + for (int i = 0; i < 2; i++) { + List fields = new ArrayList<>(); + for (int j = 0; j < 4; j++) { + fields.add(new FieldValue("f" + (j + 1), (j + 1) * 10)); + } + values.add(new RowValue(fields)); + } + table.setValues(values); + + tables.add(table); + } + } + + @Test + public void createSuperTable() { + SuperTableMeta superTableMeta = new SuperTableMeta(); + superTableMeta.setDatabase("test"); + superTableMeta.setName("weather"); + List fields = new ArrayList<>(); + for (int i = 0; i < 5; i++) { + fields.add(new FieldMeta("f" + (i + 1), "int")); + } + superTableMeta.setFields(fields); + List tags = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + tags.add(new TagMeta("t" + (i + 1), "nchar(64)")); + } + superTableMeta.setTags(tags); + + String sql = SqlSpeller.createSuperTable(superTableMeta); + System.out.println(sql); + } + + @Test + public void createTable() { + TableMeta table = new TableMeta(); + table.setDatabase("test"); + table.setName("t1"); + List fields = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + FieldMeta field = new FieldMeta(); + field.setName("f" + (i + 1)); + field.setType("nchar(64)"); + fields.add(field); + } + table.setFields(fields); + String sql = SqlSpeller.createTable(table); + System.out.println(sql); + } + + + @Test + public void testInsertOneTableMultiValues() { + TableValue table = new TableValue(); + table.setDatabase("test"); + table.setName("t1"); + List values = new ArrayList<>(); + for (int j = 0; j < 5; j++) { + List fields = new ArrayList<>(); + for (int k = 0; k < 2; k++) { + FieldValue field = new FieldValue<>(); + field.setValue((k + 1) * 100); + fields.add(field); + } + values.add(new RowValue(fields)); + } + table.setValues(values); + + String sql = SqlSpeller.insertOneTableMultiValues(table); + System.out.println(sql); + } + + @Test + public void insertOneTableMultiValuesWithColumns() { + TableValue tableValue = new TableValue(); + tableValue.setDatabase("test"); + tableValue.setName("weather"); + // columns + List columns = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + FieldMeta field = new FieldMeta(); + field.setName("f" + (i + 1)); + columns.add(field); + } + tableValue.setColumns(columns); + // values + List values = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + List fields = new ArrayList<>(); + for (int j = 0; j < 3; j++) { + FieldValue field = new FieldValue(); + field.setValue(j); + fields.add(field); + } + values.add(new RowValue(fields)); + } + tableValue.setValues(values); + + String sql = SqlSpeller.insertOneTableMultiValuesWithColumns(tableValue); + System.out.println(sql); + } + + @Test + public void insertMultiTableMultiValuesWithColumns() { + List tables = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + TableValue table = new TableValue(); + table.setDatabase("test"); + table.setName("t" + (i + 1)); + // columns + List columns = new ArrayList<>(); + for (int j = 0; j < 3; j++) { + FieldMeta field = new FieldMeta(); + field.setName("f" + (j + 1)); + columns.add(field); + } + table.setColumns(columns); + // values + List values = new ArrayList<>(); + for (int j = 0; j < 5; j++) { + List fields = new ArrayList<>(); + for (int k = 0; k < columns.size(); k++) { + FieldValue field = new FieldValue<>(); + field.setValue((k + 1) * 10); + fields.add(field); + } + values.add(new RowValue(fields)); + } + table.setValues(values); + tables.add(table); + } + + String sql = SqlSpeller.insertMultiTableMultiValuesWithColumns(tables); + System.out.println(sql); + } + + @Test + public void testInsertMultiTableMultiValues() { + List tables = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + TableValue table = new TableValue(); + table.setDatabase("test"); + table.setName("t" + (i + 1)); + List values = new ArrayList<>(); + for (int j = 0; j < 5; j++) { + List fields = new ArrayList<>(); + for (int k = 0; k < 2; k++) { + FieldValue field = new FieldValue<>(); + field.setValue((k + 1) * 10); + fields.add(field); + } + values.add(new RowValue(fields)); + } + table.setValues(values); + + tables.add(table); + } + + String sql = SqlSpeller.insertMultiTableMultiValues(tables); + System.out.println(sql); + } + +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/utils/TimeStampUtilTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/utils/TimeStampUtilTest.java index 628594c4b18008fd9e278bb6d23d0d5f87489b56..a4845677c55897c71791eba40104ea23de644f5c 100644 --- a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/utils/TimeStampUtilTest.java +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/utils/TimeStampUtilTest.java @@ -17,6 +17,8 @@ public class TimeStampUtilTest { @Test public void longToDatetime() { + System.out.println(TimeStampUtil.longToDatetime(1293334499006l)); + String datetime = TimeStampUtil.longToDatetime(1510000000000L); assertEquals("2017-11-07 04:26:40.000", datetime); long timestamp = TimeStampUtil.datetimeToLong(datetime); diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index f0b3beacbe27590fe4e3ea74974f0852a15e593a..cb233f85b8132910b316fd130983769e99452bf9 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -221,11 +221,14 @@ python3 ./test.py -f functions/function_sum.py -r 1 python3 ./test.py -f functions/function_top.py -r 1 python3 ./test.py -f functions/function_twa.py -r 1 python3 ./test.py -f functions/function_twa_test2.py +python3 ./test.py -f functions/all_null_value.py python3 queryCount.py python3 ./test.py -f query/queryGroupbyWithInterval.py python3 client/twoClients.py -python3 test.py -f query/queryInterval.py -python3 test.py -f query/queryFillTest.py +python3 ./test.py -f query/queryInterval.py +python3 ./test.py -f query/queryFillTest.py +python3 ./test.py -f query/last_row_cache.py +python3 ./test.py -f query/last_cache.py # tools python3 test.py -f tools/taosdemoTest.py diff --git a/tests/pytest/functions/all_null_value.py b/tests/pytest/functions/all_null_value.py new file mode 100644 index 0000000000000000000000000000000000000000..5354b48f80dae25f7cd65fb00f90e4eeef2908cc --- /dev/null +++ b/tests/pytest/functions/all_null_value.py @@ -0,0 +1,90 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.rowNum = 10 + self.ts = 1537146000000 + + def run(self): + tdSql.prepare() + + tdSql.execute("create table st(ts timestamp, c1 int, c2 int)") + for i in range(self.rowNum): + tdSql.execute("insert into st values(%d, null, null)" % (self.ts + i)) + + tdSql.query("select avg(c1) from st") + tdSql.checkRows(0) + + tdSql.query("select max(c1) from st") + tdSql.checkRows(0) + + tdSql.query("select min(c1) from st") + tdSql.checkRows(0) + + tdSql.query("select bottom(c1, 1) from st") + tdSql.checkRows(0) + + tdSql.query("select top(c1, 1) from st") + tdSql.checkRows(0) + + tdSql.query("select diff(c1) from st") + tdSql.checkRows(0) + + tdSql.query("select first(c1) from st") + tdSql.checkRows(0) + + tdSql.query("select last(c1) from st") + tdSql.checkRows(0) + + tdSql.query("select last_row(c1) from st") + tdSql.checkRows(1) + tdSql.checkData(0, 0, None) + + tdSql.query("select count(c1) from st") + tdSql.checkRows(0) + + tdSql.query("select leastsquares(c1, 1, 1) from st") + tdSql.checkRows(0) + + tdSql.query("select c1 + c2 from st") + tdSql.checkRows(10) + + tdSql.query("select spread(c1) from st") + tdSql.checkRows(0) + + # tdSql.query("select stddev(c1) from st") + # tdSql.checkRows(0) + + tdSql.query("select sum(c1) from st") + tdSql.checkRows(0) + + tdSql.query("select twa(c1) from st") + tdSql.checkRows(0) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/functions/function_first.py b/tests/pytest/functions/function_first.py index 2b78bd33d8bdb0bcd6775215b1cf0b1e398f2928..5b2aacb779a64377baa0417d5ba0ba5ef0a27290 100644 --- a/tests/pytest/functions/function_first.py +++ b/tests/pytest/functions/function_first.py @@ -110,6 +110,10 @@ class TDTestCase: tdSql.query("select first(col9) from test1") tdSql.checkRows(1) tdSql.checkData(0, 0, '涛思数据1') + + # TD-2607 first,last + where none exist condition + interval + tdSql.query("select first(*),last(*) from test1 where ts < 23 interval(1s)") + tdSql.checkRows(0) def stop(self): tdSql.close() diff --git a/tests/pytest/functions/function_max.py b/tests/pytest/functions/function_max.py index 3bd5031276adfd76574a1a43e311a1d6ec81ab16..d1f8f75892ce2e61388c107eb7cad1ed00582d2f 100644 --- a/tests/pytest/functions/function_max.py +++ b/tests/pytest/functions/function_max.py @@ -69,6 +69,15 @@ class TDTestCase: tdSql.query("select max(col6) from test1") tdSql.checkData(0, 0, np.max(floatData)) + + # test case: https://jira.taosdata.com:18080/browse/TD-2583 + tdSql.execute("create database test days 2") + tdSql.execute("create table car(ts timestamp, speed int)") + tdSql.execute("insert into car values(now, -1)") + tdSql.execute("insert into car values(now-10d, null)") + + tdSql.query("select max(speed) from car") + tdSql.checkData(0, 0, -1) def stop(self): tdSql.close() diff --git a/tests/pytest/functions/function_min.py b/tests/pytest/functions/function_min.py index bc180bc22414a4a3aa3d233438b10a04fd08bc2b..c779744ced63d83fd5b2116fb61dfa347ec4dd5a 100644 --- a/tests/pytest/functions/function_min.py +++ b/tests/pytest/functions/function_min.py @@ -69,6 +69,15 @@ class TDTestCase: tdSql.query("select min(col6) from test1") tdSql.checkData(0, 0, np.min(floatData)) + + # test case: https://jira.taosdata.com:18080/browse/TD-2583 + tdSql.execute("create database test days 2") + tdSql.execute("create table car(ts timestamp, speed int)") + tdSql.execute("insert into car values(now, 1)") + tdSql.execute("insert into car values(now-10d, null)") + + tdSql.query("select min(speed) from car") + tdSql.checkData(0, 0, 1) def stop(self): tdSql.close() diff --git a/tests/pytest/functions/function_percentile.py b/tests/pytest/functions/function_percentile.py index e63d65f2e6a429015e2b4d7dcbe5e8c9884eea5e..e793008ce681a04111fdee29e91520fed944d05a 100644 --- a/tests/pytest/functions/function_percentile.py +++ b/tests/pytest/functions/function_percentile.py @@ -142,6 +142,14 @@ class TDTestCase: tdSql.error("select percentile(voltage, 20) from meters") tdSql.query("select apercentile(voltage, 20) from meters") print("apercentile result: %s" % tdSql.getData(0, 0)) + + # Test case for: https://jira.taosdata.com:18080/browse/TD-2609 + tdSql.execute("create table st(ts timestamp, k int)") + tdSql.execute("insert into st values(now, -100)") + tdSql.query("select apercentile(k, 20) from st") + tdSql.checkData(0, 0, -100.00) + + def stop(self): tdSql.close() diff --git a/tests/pytest/functions/function_twa_test2.py b/tests/pytest/functions/function_twa_test2.py index 2a09ae3fc3a5e45ca0e75c63757c33fa5c9eb4cb..b20f14357e22d38162b5d69cc3ae643f700f8754 100644 --- a/tests/pytest/functions/function_twa_test2.py +++ b/tests/pytest/functions/function_twa_test2.py @@ -132,6 +132,22 @@ class TDTestCase: tdSql.query('select twa(c) from t4 interval(10s)') tdSql.checkData(0,1,10.999) + # Test case: https://jira.taosdata.com:18080/browse/TD-2624 + tdSql.execute("create database test keep 7300") + tdSql.execute("use test") + tdSql.execute("create table st(ts timestamp, k int)") + tdSql.execute("insert into st values('2011-01-02 18:42:45.326', -1)") + tdSql.execute("insert into st values('2020-07-30 17:44:06.283', 0)") + tdSql.execute("insert into st values('2020-07-30 17:44:19.578', 9999999)") + tdSql.execute("insert into st values('2020-07-30 17:46:06.417', NULL)") + tdSql.execute("insert into st values('2020-11-09 18:42:25.538', 0)") + tdSql.execute("insert into st values('2020-12-29 17:43:11.641', 0)") + tdSql.execute("insert into st values('2020-12-29 18:43:17.129', 0)") + tdSql.execute("insert into st values('2020-12-29 18:46:19.109', NULL)") + tdSql.execute("insert into st values('2021-01-03 18:40:40.065', 0)") + + tdSql.query("select twa(k),first(ts) as taos1 from st where k <50 interval(17s)") + tdSql.checkRows(6) def stop(self): tdSql.close() diff --git a/tests/pytest/handle_crash_gen_val_log.sh b/tests/pytest/handle_crash_gen_val_log.sh index 0f89ef2ec4b01f402a2d10bad829269c77b7c2ec..528316700d184171641f7f686a3c12102b6c1feb 100755 --- a/tests/pytest/handle_crash_gen_val_log.sh +++ b/tests/pytest/handle_crash_gen_val_log.sh @@ -16,7 +16,7 @@ TOP_DIR=`pwd` TAOSD_DIR=`find . -name "taosd"|grep -v community|head -n1` nohup $TAOSD_DIR >/dev/null & cd - -./crash_gen.sh --valgrind -p -t 10 -s 250 -b 4 +./crash_gen.sh --valgrind -p -t 10 -s 350 -b 4 pidof taosd|xargs kill -9 grep 'start to execute\|ERROR SUMMARY' valgrind.err|grep -v 'grep'|uniq|tee crash_gen_mem_err.log @@ -36,10 +36,14 @@ for defiMemError in `grep 'definitely lost:' crash_gen-definitely-lost-out.log | do defiMemError=(${defiMemError//,/}) if [ -n "$defiMemError" ]; then - if [ "$defiMemError" -gt 3 ]; then + if [ "$defiMemError" -gt 3 -a "$defiMemError" -lt 1013 ]; then echo -e "${RED} ## Memory errors number valgrind reports \ Definitely lost is $defiMemError. More than our threshold! ## ${NC}" - exit 8 + exit 8 + elif [ "$defiMemError" -gt 1013 ];then #add for azure + echo -e "${RED} ## Memory errors number valgrind reports \ + Definitely lost is $defiMemError. More than our threshold! ## ${NC}" + exit 8 fi fi done diff --git a/tests/pytest/pytest_2.sh b/tests/pytest/pytest_2.sh index 28c0a38351a14268594497431a19ec7f83bc05cb..4ec517a0bf1c5eff8ad670cf28ab63d5ce818460 100755 --- a/tests/pytest/pytest_2.sh +++ b/tests/pytest/pytest_2.sh @@ -15,4 +15,7 @@ python3 ./test.py -f update/merge_commit_last.py python3 ./test.py -f update/bug_td2279.py # wal -python3 ./test.py -f wal/addOldWalTest.py \ No newline at end of file +python3 ./test.py -f wal/addOldWalTest.py + +# function +python3 ./test.py -f functions/all_null_value.py \ No newline at end of file diff --git a/tests/pytest/query/last_cache.py b/tests/pytest/query/last_cache.py new file mode 100644 index 0000000000000000000000000000000000000000..c31d9821e2695dd7883674519325c24f0e473bf2 --- /dev/null +++ b/tests/pytest/query/last_cache.py @@ -0,0 +1,133 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +from util.dnodes import tdDnodes + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.tables = 10 + self.rows = 20 + self.perfix = 't' + self.ts = 1601481600000 + + def insertData(self): + print("==============step1") + tdSql.execute("create table st (ts timestamp, c1 int) tags(t1 int)") + + for i in range(self.tables): + tdSql.execute("create table %s%d using st tags(%d)" % (self.perfix, i, i)) + for j in range(self.rows): + tc = self.ts + j * 60000 + tdSql.execute("insert into %s%d values(%d, %d)" %(self.perfix, i, tc, j)) + + def executeQueries(self): + print("==============step2") + tdSql.query("select last(c1) from %s%d" % (self.perfix, 1)) + tdSql.checkData(0, 0, 19) + + tdSql.query("select last(c1) from %s%d where ts <= %d" % (self.perfix, 1, self.ts + 4 * 60000)) + tdSql.checkData(0, 0, 4) + + tdSql.query("select last(c1) as b from %s%d" % (self.perfix, 1)) + tdSql.checkData(0, 0, 19) + + tdSql.query("select last(c1) from %s%d interval(1m)" % (self.perfix, 1)) + tdSql.checkData(1, 1, 1) + + tdSql.query("select last(c1) from %s%d interval(1d)" % (self.perfix, 1)) + tdSql.checkData(0, 1, 19) + + tdSql.query("select last(c1) from %s%d where ts <= %d interval(1m)" % (self.perfix, 1, self.ts + 4 * 60000)) + tdSql.checkRows(5) + tdSql.checkData(1, 1, 1) + + tdSql.query("select last(c1) from st") + tdSql.checkData(0, 0, 19) + + tdSql.query("select last(c1) as c from st where ts <= %d" % (self.ts + 4 * 60000)) + tdSql.checkData(0, 0, 4) + + tdSql.query("select last(c1) as c from st where t1 <= 5") + tdSql.checkData(0, 0, 19) + + tdSql.query("select last(c1) as c from st where t1 <= 5 and ts <= %d" % (self.ts + 4 * 60000)) + tdSql.checkData(0, 0, 4) + + tdSql.query("select last(c1) from st interval(1m)") + tdSql.checkData(1, 1, 1) + + tdSql.query("select last(c1) from st interval(1d)") + tdSql.checkData(0, 1, 19) + + tdSql.query("select last(c1) from st group by t1") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 19) + + tdSql.query("select last(c1) as c from st where ts <= %d interval(1m) group by t1" % (self.ts + 4 * 60000)) + tdSql.checkData(1, 1, 1) + tdSql.checkRows(50) + + def run(self): + tdSql.prepare() + + # last_cache_0.sim + tdSql.execute("create database test1 cachelast 0") + tdSql.execute("use test1") + self.insertData() + self.executeQueries() + + tdSql.execute("alter database test1 cachelast 1") + self.executeQueries() + tdDnodes.stop(1) + tdDnodes.start(1) + self.executeQueries() + + tdSql.execute("alter database test1 cachelast 0") + self.executeQueries() + tdDnodes.stop(1) + tdDnodes.start(1) + self.executeQueries() + + # last_cache_1.sim + tdSql.execute("create database test2 cachelast 1") + tdSql.execute("use test2") + self.insertData() + self.executeQueries() + + tdSql.execute("alter database test2 cachelast 0") + self.executeQueries() + tdDnodes.stop(1) + tdDnodes.start(1) + self.executeQueries() + + tdSql.execute("alter database test2 cachelast 1") + self.executeQueries() + tdDnodes.stop(1) + tdDnodes.start(1) + self.executeQueries() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/query/last_row_cache.py b/tests/pytest/query/last_row_cache.py new file mode 100644 index 0000000000000000000000000000000000000000..d9e09dae7acdcd8c7401f1370da9f544c850872d --- /dev/null +++ b/tests/pytest/query/last_row_cache.py @@ -0,0 +1,186 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +from util.dnodes import tdDnodes + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.tables = 10 + self.rows = 20 + self.perfix = 't' + self.ts = 1601481600000 + + def insertData(self): + print("==============step1") + tdSql.execute("create table st (ts timestamp, c1 int) tags(t1 int)") + + for i in range(self.tables): + tdSql.execute("create table %s%d using st tags(%d)" % (self.perfix, i, i)) + for j in range(self.rows): + tc = self.ts + j * 60000 + tdSql.execute("insert into %s%d values(%d, %d)" %(self.perfix, i, tc, j)) + + def executeQueries(self): + print("==============step2") + tdSql.query("select last_row(c1) from %s%d" % (self.perfix, 1)) + tdSql.checkData(0, 0, 19) + + tdSql.query("select last_row(c1) from %s%d where ts <= %d" % (self.perfix, 1, self.ts + 4 * 60000)) + tdSql.checkData(0, 0, 4) + + tdSql.query("select last_row(c1) as b from %s%d" % (self.perfix, 1)) + tdSql.checkData(0, 0, 19) + + tdSql.query("select last_row(c1) from st") + tdSql.checkData(0, 0, 19) + + tdSql.query("select last_row(c1) as c from st where ts <= %d" % (self.ts + 4 * 60000)) + tdSql.checkData(0, 0, 4) + + tdSql.query("select last_row(c1) as c from st where t1 < 5") + tdSql.checkData(0, 0, 19) + + tdSql.query("select last_row(c1) as c from st where t1 <= 5 and ts <= %d" % (self.ts + 4 * 60000)) + tdSql.checkData(0, 0, 4) + + tdSql.query("select last_row(c1) as c from st group by t1") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 19) + + tc = self.ts + 1 * 3600000 + tdSql.execute("insert into %s%d values(%d, %d)" %(self.perfix, 1, tc, 10)) + + tc = self.ts + 3 * 3600000 + tdSql.execute("insert into %s%d values(%d, null)" %(self.perfix, 1, tc)) + + tc = self.ts + 5 * 3600000 + tdSql.execute("insert into %s%d values(%d, %d)" %(self.perfix, 1, tc, -1)) + + tc = self.ts + 7 * 3600000 + tdSql.execute("insert into %s%d values(%d, null)" %(self.perfix, 1, tc)) + + def insertData2(self): + tc = self.ts + 1 * 3600000 + tdSql.execute("insert into %s%d values(%d, %d)" %(self.perfix, 1, tc, 10)) + + tc = self.ts + 3 * 3600000 + tdSql.execute("insert into %s%d values(%d, null)" %(self.perfix, 1, tc)) + + tc = self.ts + 5 * 3600000 + tdSql.execute("insert into %s%d values(%d, %d)" %(self.perfix, 1, tc, -1)) + + tc = self.ts + 7 * 3600000 + tdSql.execute("insert into %s%d values(%d, null)" %(self.perfix, 1, tc)) + + def executeQueries2(self): + # For stable + tc = self.ts + 6 * 3600000 + tdSql.query("select last_row(c1) from st where ts < %d " % tc) + tdSql.checkData(0, 0, -1) + + tc = self.ts + 8 * 3600000 + tdSql.query("select last_row(*) from st where ts < %d " % tc) + tdSql.checkData(0, 1, None) + + tdSql.query("select last_row(*) from st") + tdSql.checkData(0, 1, None) + + tc = self.ts + 4 * 3600000 + tdSql.query("select last_row(*) from st where ts < %d " % tc) + tdSql.checkData(0, 1, None) + + tc1 = self.ts + 1 * 3600000 + tc2 = self.ts + 4 * 3600000 + tdSql.query("select last_row(*) from st where ts > %d and ts <= %d" % (tc1, tc2)) + tdSql.checkData(0, 1, None) + + # For table + tc = self.ts + 6 * 3600000 + tdSql.query("select last_row(*) from %s%d where ts <= %d" % (self.perfix, 1, tc)) + tdSql.checkData(0, 1, -1) + + tc = self.ts + 8 * 3600000 + tdSql.query("select last_row(*) from %s%d where ts <= %d" % (self.perfix, 1, tc)) + tdSql.checkData(0, 1, None) + + tdSql.query("select last_row(*) from %s%d" % (self.perfix, 1)) + tdSql.checkData(0, 1, None) + + tc = self.ts + 4 * 3600000 + tdSql.query("select last_row(*) from %s%d where ts <= %d" % (self.perfix, 1, tc)) + tdSql.checkData(0, 1, None) + + tc1 = self.ts + 1 * 3600000 + tc2 = self.ts + 4 * 3600000 + tdSql.query("select last_row(*) from st where ts > %d and ts <= %d" % (tc1, tc2)) + tdSql.checkData(0, 1, None) + + def run(self): + tdSql.prepare() + + print("============== last_row_cache_0.sim") + tdSql.execute("create database test1 cachelast 0") + tdSql.execute("use test1") + self.insertData() + self.executeQueries() + self.insertData2() + self.executeQueries2() + + print("============== alter last cache") + tdSql.execute("alter database test1 cachelast 1") + self.executeQueries2() + tdDnodes.stop(1) + tdDnodes.start(1) + self.executeQueries2() + + tdSql.execute("alter database test1 cachelast 0") + self.executeQueries2() + tdDnodes.stop(1) + tdDnodes.start(1) + self.executeQueries2() + + print("============== last_row_cache_1.sim") + tdSql.execute("create database test2 cachelast 1") + tdSql.execute("use test2") + self.insertData() + self.executeQueries() + self.insertData2() + self.executeQueries2() + + tdSql.execute("alter database test2 cachelast 0") + self.executeQueries2() + tdDnodes.stop(1) + tdDnodes.start(1) + self.executeQueries2() + + tdSql.execute("alter database test2 cachelast 1") + self.executeQueries2() + tdDnodes.stop(1) + tdDnodes.start(1) + self.executeQueries2() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/query/queryInterval.py b/tests/pytest/query/queryInterval.py index 871c076c0813f0ccd340d33bff992401e522df4a..9cc468b34e6133af393d9d0d800e64d14e85c430 100644 --- a/tests/pytest/query/queryInterval.py +++ b/tests/pytest/query/queryInterval.py @@ -24,7 +24,7 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) - self.ts = 1593548685000 + self.ts = 1593548685000 def run(self): tdSql.prepare() @@ -84,6 +84,22 @@ class TDTestCase: tdDnodes.start(1) tdSql.query("select last(*) from t interval(1s)") tdSql.checkRows(10000) + + # test case for https://jira.taosdata.com:18080/browse/TD-2601 + newTs = 1601481600000 + + tdSql.execute("create database test2") + tdSql.execute("use test2") + tdSql.execute("create table t (ts timestamp, voltage int)") + for i in range(100): + tdSql.execute("insert into t values(%d, %d)" % (newTs + i * 10000000, i)) + + tdSql.query("select sum(voltage) from t where ts >='2020-10-01 00:00:00' and ts <='2020-12-01 00:00:00' interval(1n) fill(NULL)") + tdSql.checkRows(3) + tdSql.checkData(0, 1, 4950) + tdSql.checkData(1, 1, None) + tdSql.checkData(2, 1, None) + def stop(self): diff --git a/tests/pytest/stream/sys.py b/tests/pytest/stream/sys.py index ecbcb651b14b494f247b014ff0cf81e5ae7db8f4..a73e7043e8c65b2eb9c78fbcb99d4e546ddf9ae4 100644 --- a/tests/pytest/stream/sys.py +++ b/tests/pytest/stream/sys.py @@ -28,6 +28,7 @@ class TDTestCase: def run(self): + time.sleep(5) tdSql.execute("use log") tdSql.execute("create table cpustrm as select count(*), avg(cpu_taosd), max(cpu_taosd), min(cpu_taosd), avg(cpu_system), max(cpu_cores), min(cpu_cores), last(cpu_cores) from log.dn1 interval(4s)") diff --git a/tests/pytest/test.py b/tests/pytest/test.py index 6be86fe3fdc4f0cd781d7749b9011a72ad687571..c7781f20873066b02416049e8c472e07c0c3e328 100644 --- a/tests/pytest/test.py +++ b/tests/pytest/test.py @@ -118,8 +118,11 @@ if __name__ == "__main__": tdDnodes.stopAll() is_test_framework = 0 key_word = 'tdCases.addLinux' - if key_word in open(fileName).read(): - is_test_framework = 1 + try: + if key_word in open(fileName).read(): + is_test_framework = 1 + except: + pass if is_test_framework: moduleName = fileName.replace(".py", "").replace("/", ".") uModule = importlib.import_module(moduleName) diff --git a/tests/pytest/tools/lowaTest.py b/tests/pytest/tools/lowaTest.py index 523229dd463d54c5b2cd23a9a3d4d547858a3b5c..2b65dcf3eff1ed9ed7275fd774807cfa0318ec81 100644 --- a/tests/pytest/tools/lowaTest.py +++ b/tests/pytest/tools/lowaTest.py @@ -51,7 +51,7 @@ class TDTestCase: else: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath+ "/build/bin/" - os.system("yes | %slowa -f tools/insert.json" % binPath) + os.system("yes | %staosdemox -f tools/insert.json" % binPath) tdSql.execute("use db01") tdSql.query("select count(*) from stb01") diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 83cb4e8d9994b1729ee13dda6829de300d9e8d7a..afea23372f5bd13949bb6967392167df59c3d2b2 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -291,8 +291,8 @@ class TDDnode: break tdLog.debug("the dnode:%d has been started." % (self.index)) else: - tdLog.debug("wait 5 seconds for the dnode:%d to start." % (self.index)) - time.sleep(5) + tdLog.debug("wait 10 seconds for the dnode:%d to start." % (self.index)) + time.sleep(10) # time.sleep(5) diff --git a/tests/script/general/connection/sim.tar.gz b/tests/script/general/connection/sim.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..10bc1a6bace1c8b6796a98c53e3aa3c15e0bfd7c Binary files /dev/null and b/tests/script/general/connection/sim.tar.gz differ diff --git a/tests/script/general/connection/test_old_data.sim b/tests/script/general/connection/test_old_data.sim new file mode 100644 index 0000000000000000000000000000000000000000..83df850f0bb93fd216520bbea4065400aa334091 --- /dev/null +++ b/tests/script/general/connection/test_old_data.sim @@ -0,0 +1,32 @@ +system sh/stop_dnodes.sh +system sh/mv_old_data.sh + +print ============== deploy + +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start + +print =============== step1 + +sql use test +sql select * from m1 + +print $rows points data are retrieved +if $rows != 7 then + return -1 +endi + +print =============== step 2 + +sql select * from t1 + +print $rows points data are retrieved +if $rows != 7 then + return -1 +endi + + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index efe437fc1d319796dbf6179d570a38107618bc69..733b01f89500c63201d01ff5d13bc66c92efe109 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -385,3 +385,8 @@ cd ../../../debug; make ./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir.sim ./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir_stopAll_starAll.sim ./test.sh -f unique/migrate/mn2_vn2_repl2_rmVnodeDir.sim + +./test.sh -f general/connection/test_old_data.sim +./test.sh -f unique/dnode/datatrans_3node.sim +./test.sh -f unique/dnode/datatrans_3node_2.sim + diff --git a/tests/script/jenkins/basic_1.txt b/tests/script/jenkins/basic_1.txt index 9820a00f40b60263537f98cafb098b34b57032ff..c124b60f806ff2e082289fa7e98b896a77e9d3b9 100644 --- a/tests/script/jenkins/basic_1.txt +++ b/tests/script/jenkins/basic_1.txt @@ -1,26 +1,3 @@ -./test.sh -f general/alter/cached_schema_after_alter.sim -./test.sh -f general/alter/count.sim -./test.sh -f general/alter/dnode.sim -./test.sh -f general/alter/import.sim -./test.sh -f general/alter/insert1.sim -./test.sh -f general/alter/insert2.sim -./test.sh -f general/alter/metrics.sim -./test.sh -f general/alter/table.sim - -./test.sh -f general/cache/new_metrics.sim -./test.sh -f general/cache/restart_metrics.sim -./test.sh -f general/cache/restart_table.sim - -./test.sh -f general/connection/connection.sim - -./test.sh -f general/column/commit.sim -./test.sh -f general/column/metrics.sim -./test.sh -f general/column/table.sim - -./test.sh -f general/compress/commitlog.sim -./test.sh -f general/compress/compress.sim -./test.sh -f general/compress/compress2.sim -./test.sh -f general/compress/uncompress.sim ./test.sh -f general/compute/avg.sim ./test.sh -f general/compute/bottom.sim @@ -153,14 +130,6 @@ ./test.sh -f general/db/nosuchfile.sim ./test.sh -f general/parser/function.sim -./test.sh -f general/stable/disk.sim -./test.sh -f general/stable/dnode3.sim -./test.sh -f general/stable/metrics.sim -./test.sh -f general/stable/refcount.sim -./test.sh -f general/stable/show.sim -./test.sh -f general/stable/values.sim -./test.sh -f general/stable/vnode3.sim - ./test.sh -f general/table/autocreate.sim ./test.sh -f general/table/basic1.sim ./test.sh -f general/table/basic2.sim @@ -188,20 +157,4 @@ ./test.sh -f general/table/table.sim ./test.sh -f general/table/tinyint.sim ./test.sh -f general/table/vgroup.sim -./test.sh -f unique/dnode/alternativeRole.sim -./test.sh -f unique/dnode/balance1.sim -./test.sh -f unique/dnode/balance2.sim -./test.sh -f unique/dnode/balance3.sim -./test.sh -f unique/dnode/balancex.sim -./test.sh -f unique/dnode/offline1.sim -./test.sh -f unique/dnode/offline2.sim -./test.sh -f unique/dnode/reason.sim -./test.sh -f unique/dnode/remove1.sim -./test.sh -f unique/dnode/remove2.sim -./test.sh -f unique/dnode/vnode_clean.sim - -./test.sh -f unique/http/admin.sim -./test.sh -f unique/http/opentsdb.sim -./test.sh -f unique/import/replica2.sim -./test.sh -f unique/import/replica3.sim diff --git a/tests/script/jenkins/basic_3.txt b/tests/script/jenkins/basic_3.txt index b44a2c6d44611da0360fe0a2c4266fd92f99aee2..25bfde28f0b724335e248933f01e72c3c35fd3dc 100644 --- a/tests/script/jenkins/basic_3.txt +++ b/tests/script/jenkins/basic_3.txt @@ -73,3 +73,7 @@ ./test.sh -f general/stream/stream_restart.sim ./test.sh -f general/stream/table_del.sim ./test.sh -f general/stream/table_replica1_vnoden.sim + +./test.sh -f general/connection/test_old_data.sim +./test.sh -f unique/dnode/datatrans_3node.sim +./test.sh -f unique/dnode/datatrans_3node_2.sim \ No newline at end of file diff --git a/tests/script/jenkins/basic_4.txt b/tests/script/jenkins/basic_4.txt new file mode 100644 index 0000000000000000000000000000000000000000..895281f218717af7dac3b8e1f890ce3e03b81609 --- /dev/null +++ b/tests/script/jenkins/basic_4.txt @@ -0,0 +1,49 @@ +./test.sh -f unique/dnode/alternativeRole.sim +./test.sh -f unique/dnode/balance1.sim +./test.sh -f unique/dnode/balance2.sim +./test.sh -f unique/dnode/balance3.sim +./test.sh -f unique/dnode/balancex.sim +./test.sh -f unique/dnode/offline1.sim +./test.sh -f unique/dnode/offline2.sim +./test.sh -f unique/dnode/reason.sim +./test.sh -f unique/dnode/remove1.sim +./test.sh -f unique/dnode/remove2.sim +./test.sh -f unique/dnode/vnode_clean.sim + +./test.sh -f unique/http/admin.sim +./test.sh -f unique/http/opentsdb.sim + +./test.sh -f unique/import/replica2.sim +./test.sh -f unique/import/replica3.sim + +./test.sh -f general/alter/cached_schema_after_alter.sim +./test.sh -f general/alter/count.sim +./test.sh -f general/alter/dnode.sim +./test.sh -f general/alter/import.sim +./test.sh -f general/alter/insert1.sim +./test.sh -f general/alter/insert2.sim +./test.sh -f general/alter/metrics.sim +./test.sh -f general/alter/table.sim + +./test.sh -f general/cache/new_metrics.sim +./test.sh -f general/cache/restart_metrics.sim +./test.sh -f general/cache/restart_table.sim + +./test.sh -f general/connection/connection.sim + +./test.sh -f general/column/commit.sim +./test.sh -f general/column/metrics.sim +./test.sh -f general/column/table.sim + +./test.sh -f general/compress/commitlog.sim +./test.sh -f general/compress/compress.sim +./test.sh -f general/compress/compress2.sim +./test.sh -f general/compress/uncompress.sim + +./test.sh -f general/stable/disk.sim +./test.sh -f general/stable/dnode3.sim +./test.sh -f general/stable/metrics.sim +./test.sh -f general/stable/refcount.sim +./test.sh -f general/stable/show.sim +./test.sh -f general/stable/values.sim +./test.sh -f general/stable/vnode3.sim \ No newline at end of file diff --git a/tests/script/sh/move_dnode.sh b/tests/script/sh/move_dnode.sh new file mode 100755 index 0000000000000000000000000000000000000000..d6dc4bc3eb24fe094067cffcb51a7c335a512a94 --- /dev/null +++ b/tests/script/sh/move_dnode.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +echo "Executing move_dnode.sh" + +SCRIPT_DIR=`dirname $0` +cd $SCRIPT_DIR/../ +SCRIPT_DIR=`pwd` +echo "SCRIPT_DIR: $SCRIPT_DIR" + +IN_TDINTERNAL="community" +if [[ "$SCRIPT_DIR" == *"$IN_TDINTERNAL"* ]]; then + cd ../../.. +else + cd ../../ +fi + +TAOS_DIR=`pwd` +TAOSD_DIR=`find . -name "taosd"|grep bin|head -n1` + +if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then + BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2,3` +else + BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2` +fi + +BUILD_DIR=$TAOS_DIR/$BIN_DIR/build + +SIM_DIR=$TAOS_DIR/sim + +NODE_DIR=$SIM_DIR/$NODE_NAME + +if [ -d "$SIM_DIR/$2" ];then + rm -rf $SIM_DIR/$2 +fi +mv $SIM_DIR/$1 $SIM_DIR/$2 + +if [[ $2 =~ "dnode2" ]];then + sed -i 's/serverPort 7100/serverPort 7200/g' $SIM_DIR/$2/cfg/taos.cfg + sed -i 's/dnode1/dnode2/g' $SIM_DIR/$2/cfg/taos.cfg + sed -i 's/7100/7200/g' $SIM_DIR/$2/data/dnode/dnodeEps.json +elif [[ $2 =~ "dnode4" ]];then + sed -i 's/serverPort 7100/serverPort 7400/g' $SIM_DIR/$2/cfg/taos.cfg + sed -i 's/dnode1/dnode4/g' $SIM_DIR/$2/cfg/taos.cfg + sed -i 's/7100/7400/g' $SIM_DIR/dnode2/data/dnode/dnodeEps.json + sed -i 's/7100/7400/g' $SIM_DIR/dnode3/data/dnode/dnodeEps.json + sed -i 's/7100/7400/g' $SIM_DIR/$2/data/dnode/dnodeEps.json +fi diff --git a/tests/script/sh/mv_old_data.sh b/tests/script/sh/mv_old_data.sh new file mode 100755 index 0000000000000000000000000000000000000000..112e9760d637d9698b87729c06ba7b2bde9651f8 --- /dev/null +++ b/tests/script/sh/mv_old_data.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +echo "Executing mv_old_data.sh" + +SCRIPT_DIR=`dirname $0` +cd $SCRIPT_DIR/../ +SCRIPT_DIR=`pwd` +echo "SCRIPT_DIR: $SCRIPT_DIR" + +IN_TDINTERNAL="community" +if [[ "$SCRIPT_DIR" == *"$IN_TDINTERNAL"* ]]; then + cd ../../.. +else + cd ../../ +fi + +TAOS_DIR=`pwd` +TAOSD_DIR=`find . -name "taosd"|grep bin|head -n1` + +if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then + BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2,3` +else + BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2` +fi + +BUILD_DIR=$TAOS_DIR/$BIN_DIR/build + +SIM_DIR=$TAOS_DIR/sim + +NODE_DIR=$SIM_DIR/$NODE_NAME + +rm -rf $SIM_DIR/dnode1 +rm -rf $SIM_DIR/dnode2 +rm -rf $SIM_DIR/dnode3 +rm -rf $SIM_DIR/tsim + +tar zxf $SCRIPT_DIR/general/connection/sim.tar.gz -C $SIM_DIR/../ diff --git a/tests/script/unique/db/replica_reduce31.sim b/tests/script/unique/db/replica_reduce31.sim index 00a0bbfcb3e74628dc71e357bd66046532844c41..5350bcc78c327ae9eb35f24e6d01901cebfb7a07 100644 --- a/tests/script/unique/db/replica_reduce31.sim +++ b/tests/script/unique/db/replica_reduce31.sim @@ -99,9 +99,11 @@ print ========= step2 alter db sql_error alter database d1 replica 1 sql_error alter database d2 replica 1 sql_error alter database d3 replica 1 +sql_error alter database d4 replica 1 sql alter database d1 replica 2 sql alter database d2 replica 2 sql alter database d3 replica 2 +sql alter database d4 replica 2 $x = 0 a2: @@ -129,9 +131,16 @@ if $data03 != 2 then goto a2 endi +sql show d4.vgroups +print online vnodes $data03 +if $data03 != 2 then + goto a2 +endi + sql alter database d1 replica 1 sql alter database d2 replica 1 sql alter database d3 replica 1 +sql alter database d4 replica 1 $x = 0 a1: @@ -159,6 +168,27 @@ if $data03 != 1 then goto a1 endi +sql show d4.vgroups +print online vnodes $data03 +if $data03 != 1 then + goto a1 +endi + +sql show dnodes +print $data00 $data01 $data02 $data03 +print $data10 $data11 $data12 $data13 +print $data20 $data21 $data22 $data23 + +if $data02 != 0 then + goto a1 +endi +if $data12 != 2 then + goto a1 +endi +if $data22 != 2 then + goto a1 +endi + print ========= step3 sql reset query cache sleep 100 @@ -192,6 +222,7 @@ print ========= step4 alter db sql alter database d1 replica 2 sql alter database d2 replica 2 sql alter database d3 replica 2 +sql alter database d4 replica 2 $x = 0 step4: @@ -219,6 +250,12 @@ if $data03 != 2 then goto step4 endi +sql show d4.vgroups +print online vnodes $data03 +if $data03 != 2 then + goto step4 +endi + sql insert into d1.t1 values(now, 3) sql insert into d2.t2 values(now, 3) sql insert into d3.t3 values(now, 3) @@ -249,27 +286,27 @@ system sh/exec.sh -n dnode2 -s stop -x SIGINT sql reset query cache sleep 100 -sql insert into d1.t1 values(now, 4) -x step1 -step1: -sql insert into d2.t2 values(now, 4) -x step2 -step2: -sql insert into d3.t3 values(now, 4) -x step3 -step3: -sql insert into d4.t4 values(now, 4) -x step4 -step4: +#sql insert into d1.t1 values(now, 4) -x step1 +#step1: +#sql insert into d2.t2 values(now, 4) -x step2 +#step2: +#sql insert into d3.t3 values(now, 4) -x step3 +#step3: +#sql insert into d4.t4 values(now, 4) -x step4 +#step4: print ========= step5 system sh/exec.sh -n dnode2 -s start system sh/exec.sh -n dnode3 -s stop -x SIGINT -sql insert into d1.t1 values(now, 5) -x step5 -step5: -sql insert into d2.t2 values(now, 5) -x step6 -step6: -sql insert into d3.t3 values(now, 5) -x step7 -step7: -sql insert into d4.t4 values(now, 5) -x step8 -step8: +#sql insert into d1.t1 values(now, 5) -x step5 +#step5: +#sql insert into d2.t2 values(now, 5) -x step6 +#step6: +#sql insert into d3.t3 values(now, 5) -x step7 +#step7: +#sql insert into d4.t4 values(now, 5) -x step8 +#step8: print ========= step6 system sh/exec.sh -n dnode3 -s start @@ -286,4 +323,5 @@ sql select * from d4.t4 system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode3 -s stop -x SIGINT +system sh/exec.sh -n dnode4 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/dnode/datatrans_1node.sim b/tests/script/unique/dnode/datatrans_1node.sim new file mode 100644 index 0000000000000000000000000000000000000000..bc38bfaf2df172dfb752eaf94549995ed513ba74 --- /dev/null +++ b/tests/script/unique/dnode/datatrans_1node.sim @@ -0,0 +1,53 @@ + +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c wallevel -v 2 +system sh/exec.sh -n dnode1 -s start + +sleep 3000 +sql connect + +print =============== step1 +sql drop database -x step1 +step1: +sql create database db +sql use db +sql create table m1 (ts timestamp, speed int) + +print =============== step 2 +$x = 0 +while $x < 10 + $cc = $x * 60000 + $ms = 1601481600000 + $cc + + sql insert into m1 values ($ms , $x ) + $x = $x + 1 +endw + +sql select * from m1 + +print $rows points data are retrieved +if $rows != 10 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT + +print =============== step 3 +system sh/move_dnode.sh dnode1 dnode2 +system sh/exec.sh -n dnode2 -s start + + +print =============== step 4 +sleep 3000 +sql connect + +sql select * from db.m1 + +print $rows points data are retrieved +if $rows != 10 then + return -1 +endi + +system sh/exec.sh -n dnode2 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/dnode/datatrans_3node.sim b/tests/script/unique/dnode/datatrans_3node.sim new file mode 100644 index 0000000000000000000000000000000000000000..7c3708c1110df2714db25a59b6a5bc37b5f3818c --- /dev/null +++ b/tests/script/unique/dnode/datatrans_3node.sim @@ -0,0 +1,91 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 + + +system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1 +system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1 +system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1 + + +system sh/cfg.sh -n dnode1 -c walLevel -v 2 +system sh/cfg.sh -n dnode2 -c walLevel -v 2 +system sh/cfg.sh -n dnode3 -c walLevel -v 2 + +system sh/cfg.sh -n dnode1 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode2 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode3 -c balanceInterval -v 10 + +system sh/cfg.sh -n dnode1 -c role -v 2 +system sh/cfg.sh -n dnode2 -c role -v 2 +system sh/cfg.sh -n dnode3 -c role -v 2 + + +system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 +system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 +system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 + + + + +print ============== step1: start dnode1 +system sh/exec.sh -n dnode1 -s start +sleep 3000 +sql connect + +print ============== step2: start dnode2/dnode3 and add into cluster , then create database with replica 2, and create table, insert data +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +sql create dnode $hostname2 +sql create dnode $hostname3 +sleep 3000 + +# create table +sql drop database -x step1 +step1: +sql create database db +sql use db +sql create table m1 (ts timestamp, speed int) + +# insert data +$x = 0 +while $x < 10 + $cc = $x * 60000 + $ms = 1601481600000 + $cc + + sql insert into m1 values ($ms , $x ) + $x = $x + 1 +endw + +sql select * from m1 + +print $rows points data are retrieved +if $rows != 10 then + return -1 +endi + +print ============== step3: stop cluster , then move_dnode1 ,start cluster +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT + +system sh/move_dnode.sh dnode1 dnode4 +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +system sh/exec.sh -n dnode4 -s start + +print =============== step 4 +sleep 3000 +sql connect + +sql select * from db.m1 + +print $rows points data are retrieved +if $rows != 10 then + return -1 +endi + +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT +system sh/exec.sh -n dnode4 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/dnode/datatrans_3node_2.sim b/tests/script/unique/dnode/datatrans_3node_2.sim new file mode 100644 index 0000000000000000000000000000000000000000..4fb3b4535fb640ac7257af33b651941462a56896 --- /dev/null +++ b/tests/script/unique/dnode/datatrans_3node_2.sim @@ -0,0 +1,91 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 + + +system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1 +system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1 +system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1 + + +system sh/cfg.sh -n dnode1 -c walLevel -v 2 +system sh/cfg.sh -n dnode2 -c walLevel -v 2 +system sh/cfg.sh -n dnode3 -c walLevel -v 2 + +system sh/cfg.sh -n dnode1 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode2 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode3 -c balanceInterval -v 10 + +system sh/cfg.sh -n dnode1 -c role -v 2 +system sh/cfg.sh -n dnode2 -c role -v 2 +system sh/cfg.sh -n dnode3 -c role -v 2 + + +system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 +system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 +system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 + + + + +print ============== step1: start dnode1 +system sh/exec.sh -n dnode1 -s start +sleep 3000 +sql connect + +print ============== step2: start dnode2/dnode3 and add into cluster , then create database with replica 2, and create table, insert data +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +sql create dnode $hostname2 +sql create dnode $hostname3 +sleep 3000 + +# create table +sql drop database -x step1 +step1: +sql create database db replica 2 +sql use db +sql create table m1 (ts timestamp, speed int) + +# insert data +$x = 0 +while $x < 10 + $cc = $x * 60000 + $ms = 1601481600000 + $cc + + sql insert into m1 values ($ms , $x ) + $x = $x + 1 +endw + +sql select * from m1 + +print $rows points data are retrieved +if $rows != 10 then + return -1 +endi + +print ============== step3: stop cluster , then move_dnode1 ,start cluster +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT + +system sh/move_dnode.sh dnode1 dnode4 +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +system sh/exec.sh -n dnode4 -s start + +print =============== step 4 +sleep 3000 +sql connect + +sql select * from db.m1 + +print $rows points data are retrieved +if $rows != 10 then + return -1 +endi + +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT +system sh/exec.sh -n dnode4 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/test-all.sh b/tests/test-all.sh index 19d78032554682d533c89bc4e4bd473689364d87..0c1f55f5f019b102fb35ad800de7627a72cc2d9a 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -138,6 +138,7 @@ if [ "$2" != "python" ]; then elif [ "$1" == "b1" ]; then echo "### run TSIM b1 test ###" runSimCaseOneByOne jenkins/basic_1.txt + runSimCaseOneByOne jenkins/basic_4.txt elif [ "$1" == "b2" ]; then echo "### run TSIM b2 test ###" runSimCaseOneByOne jenkins/basic_2.txt @@ -153,6 +154,9 @@ if [ "$2" != "python" ]; then elif [ "$1" == "b3fq" ]; then echo "### run TSIM b3 test ###" runSimCaseOneByOnefq jenkins/basic_3.txt + elif [ "$1" == "b4fq" ]; then + echo "### run TSIM b4 test ###" + runSimCaseOneByOnefq jenkins/basic_4.txt elif [ "$1" == "smoke" ] || [ -z "$1" ]; then echo "### run TSIM smoke test ###" runSimCaseOneByOne basicSuite.sim diff --git a/tests/tsim/src/simExe.c b/tests/tsim/src/simExe.c index 2db750cdd302c63522e008e3ea324230dd87b6e5..0323f6ca687c40c8bc70b0672abe4e2f7852a8cc 100644 --- a/tests/tsim/src/simExe.c +++ b/tests/tsim/src/simExe.c @@ -292,6 +292,9 @@ bool simExecuteRunBackCmd(SScript *script, char *option) { if (pthread_create(&newScript->bgPid, NULL, simExecuteScript, (void *)newScript) != 0) { sprintf(script->error, "lineNum:%d. create background thread failed", script->lines[script->linePos].lineNum); return false; + } else { + simDebug("script:%s, background thread:0x%08" PRIx64 " is created", newScript->fileName, + taosGetPthreadId(newScript->bgPid)); } script->linePos++; @@ -448,7 +451,6 @@ void simCloseNativeConnect(SScript *script) { simDebug("script:%s, taos:%p closed", script->fileName, script->taos); taos_close(script->taos); - taosMsleep(1200); script->taos = NULL; } diff --git a/tests/tsim/src/simMain.c b/tests/tsim/src/simMain.c index 33fd24dd5823b8ec7bf818cec7c36ddac783bc24..8f13254f68165a9e26ecac2406538f8cba905120 100644 --- a/tests/tsim/src/simMain.c +++ b/tests/tsim/src/simMain.c @@ -40,14 +40,14 @@ int32_t main(int32_t argc, char *argv[]) { printf("usage: %s [options] \n", argv[0]); printf(" [-c config]: config directory, default is: %s\n", configDir); printf(" [-f script]: script filename\n"); - exit(0); + return 0; } } if (!simSystemInit()) { simError("failed to initialize the system"); simSystemCleanUp(); - exit(1); + return -1; } simInfo("simulator is running ..."); @@ -56,7 +56,7 @@ int32_t main(int32_t argc, char *argv[]) { SScript *script = simParseScript(scriptFile); if (script == NULL) { simError("parse script file:%s failed", scriptFile); - exit(-1); + return -1; } simScriptList[++simScriptPos] = script; diff --git a/tests/tsim/src/simSystem.c b/tests/tsim/src/simSystem.c index 693ade7b35a095b499198db9dcc27335f73e14bd..bf47c56718b7377aa5cd4fd48269eec0fc9e0b38 100644 --- a/tests/tsim/src/simSystem.c +++ b/tests/tsim/src/simSystem.c @@ -93,27 +93,34 @@ void simFreeScript(SScript *script) { for (int32_t i = 0; i < script->bgScriptLen; ++i) { SScript *bgScript = script->bgScripts[i]; - simInfo("script:%s, set stop flag", script->fileName); + simDebug("script:%s, is background script, set stop flag", bgScript->fileName); bgScript->killed = true; if (taosCheckPthreadValid(bgScript->bgPid)) { pthread_join(bgScript->bgPid, NULL); } + + simDebug("script:%s, background thread joined", bgScript->fileName); + taos_close(bgScript->taos); + tfree(bgScript->lines); + tfree(bgScript->optionBuffer); + tfree(bgScript); } - } - simDebug("script:%s, is freed", script->fileName); - taos_close(script->taos); - tfree(script->lines); - tfree(script->optionBuffer); - tfree(script); + simDebug("script:%s, is cleaned", script->fileName); + taos_close(script->taos); + tfree(script->lines); + tfree(script->optionBuffer); + tfree(script); + } } SScript *simProcessCallOver(SScript *script) { if (script->type == SIM_SCRIPT_TYPE_MAIN) { + simDebug("script:%s, is main script, set stop flag", script->fileName); if (script->killed) { simInfo("script:" FAILED_PREFIX "%s" FAILED_POSTFIX ", " FAILED_PREFIX "failed" FAILED_POSTFIX ", error:%s", script->fileName, script->error); - exit(-1); + return NULL; } else { simInfo("script:" SUCCESS_PREFIX "%s" SUCCESS_POSTFIX ", " SUCCESS_PREFIX "success" SUCCESS_POSTFIX, script->fileName); @@ -125,13 +132,13 @@ SScript *simProcessCallOver(SScript *script) { if (simScriptPos == -1) { simInfo("----------------------------------------------------------------------"); simInfo("Simulation Test Done, " SUCCESS_PREFIX "%d" SUCCESS_POSTFIX " Passed:\n", simScriptSucced); - exit(0); + return NULL; } return simScriptList[simScriptPos]; } } else { - simInfo("script:%s, is stopped by main script", script->fileName); + simDebug("script:%s, is stopped", script->fileName); simFreeScript(script); return NULL; } @@ -161,5 +168,6 @@ void *simExecuteScript(void *inputScript) { } } + simInfo("thread is stopped"); return NULL; }