diff --git a/cmake/install.inc b/cmake/install.inc
index fced6389660c22028e604da24efe54e0503ad0da..e9ad240a793b9736edbe5769c6af12276e13a1a6 100755
--- a/cmake/install.inc
+++ b/cmake/install.inc
@@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
IF (TD_MVN_INSTALLED)
- INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-*-dist.jar DESTINATION connector/jdbc)
+ INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.34-dist.jar DESTINATION connector/jdbc)
ENDIF ()
ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
diff --git a/cmake/version.inc b/cmake/version.inc
index 7c646c67b65b6f52542cc155938f5396c589ae7a..7c0a824c9c39c6760b3e2408d969048983430811 100755
--- a/cmake/version.inc
+++ b/cmake/version.inc
@@ -4,13 +4,13 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
- SET(TD_VER_NUMBER "2.1.4.1")
+ SET(TD_VER_NUMBER "2.1.5.0")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
SET(TD_VER_COMPATIBLE ${VERCOMPATIBLE})
ELSE ()
- SET(TD_VER_COMPATIBLE "1.0.0.0")
+ SET(TD_VER_COMPATIBLE "2.0.0.0")
ENDIF ()
find_program(HAVE_GIT NAMES git)
diff --git a/documentation20/cn/02.getting-started/docs.md b/documentation20/cn/02.getting-started/docs.md
index c67b9e61b97c982b143eb1f5957e59f181f477f3..ab10b28fd3950bfa10e47113696de0829b2da74d 100644
--- a/documentation20/cn/02.getting-started/docs.md
+++ b/documentation20/cn/02.getting-started/docs.md
@@ -179,17 +179,15 @@ taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s);
| | **CentOS 6/7/8** | **Ubuntu 16/18/20** | **Other Linux** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** | **华为 EulerOS** |
| -------------- | --------------------- | ------------------------ | --------------- | --------------- | ------------------------- | --------------------- | --------------------- |
| X64 | ● | ● | | ○ | ● | ● | ● |
-| 树莓派 ARM32 | | ● | ● | | | | |
| 龙芯 MIPS64 | | | ● | | | | |
-| 鲲鹏 ARM64 | | ○ | ○ | | ● | | |
-| 申威 Alpha64 | | | ○ | ● | | | |
+| 鲲鹏 ARM64 | | ○ | ○ | | ● | | |
+| 申威 Alpha64 | | | ○ | ● | | | |
| 飞腾 ARM64 | | ○ 优麒麟 | | | | | |
| 海光 X64 | ● | ● | ● | ○ | ● | ● | |
-| 瑞芯微 ARM64/32 | | | ○ | | | | |
-| 全志 ARM64/32 | | | ○ | | | | |
-| 炬力 ARM64/32 | | | ○ | | | | |
-| TI ARM32 | | | ○ | | | | |
-| 华为云 ARM64 | | | | | | | ● |
+| 瑞芯微 ARM64 | | | ○ | | | | |
+| 全志 ARM64 | | | ○ | | | | |
+| 炬力 ARM64 | | | ○ | | | | |
+| 华为云 ARM64 | | | | | | | ● |
注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。
diff --git a/documentation20/cn/03.architecture/02.replica/docs.md b/documentation20/cn/03.architecture/02.replica/docs.md
index 59192ee0cc1fdeb130e2f541b424af284fbc916a..27ac7f123cdd2a56df9e65ae0fa13d1ff8faa23d 100644
--- a/documentation20/cn/03.architecture/02.replica/docs.md
+++ b/documentation20/cn/03.architecture/02.replica/docs.md
@@ -111,7 +111,7 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性
2. 应用将打上版本号的写请求封装一个WAL Head, 写入WAL(Write Ahead Log)
3. 应用调用API syncForwardToPeer,如果vnode B是slave状态,sync模块将包含WAL Head的数据包通过Forward消息发送给vnode B,否则就不转发。
4. vnode B收到Forward消息后,调用回调函数writeToCache, 交给应用处理
-5. vnode B应用在写入成功后,都需要调用syncAckForward通知sync模块已经写入成功。
+5. vnode B应用在写入成功后,都需要调用syncConfirmForward通知sync模块已经写入成功。
6. 如果quorum大于1,vnode B需要等待应用的回复确认,收到确认后,vnode B发送Forward Response消息给node A。
7. 如果quorum大于1,vnode A需要等待vnode B或其他副本对Forward消息的确认。
8. 如果quorum大于1,vnode A收到quorum-1条确认消息后,调用回调函数confirmForward,通知应用写入成功。
@@ -140,7 +140,7 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性
整个数据恢复流程分为两大步骤,第一步,先恢复archived data(file), 然后恢复wal。具体流程如下:
-
+
1. 通过已经建立的TCP连接,发送sync req给master节点
2. master收到sync req后,以client的身份,向vnode B主动建立一新的专用于同步的TCP连接(syncFd)
diff --git a/documentation20/cn/03.architecture/docs.md b/documentation20/cn/03.architecture/docs.md
index d22855198abc804d6cee5603b56537c9315d610e..b481bea9f840ad459812f955aa76a8a7829d5b37 100644
--- a/documentation20/cn/03.architecture/docs.md
+++ b/documentation20/cn/03.architecture/docs.md
@@ -323,8 +323,6 @@ Vnode会保持一个数据版本号(Version),对内存数据进行持久化存
采用同步复制,系统的性能会有所下降,而且latency会增加。因为元数据要强一致,mnode之间的数据同步缺省就是采用的同步复制。
-注:vnode之间的同步复制仅仅企业版支持
-
## 缓存与持久化
### 缓存
diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md
index 73fa5b34e5620a7b56e1edd35c6705bf47c5d306..6d39c255652b9c04a0f7c397d3364b78b9efc953 100644
--- a/documentation20/cn/12.taos-sql/docs.md
+++ b/documentation20/cn/12.taos-sql/docs.md
@@ -48,7 +48,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63 用于 NULL |
| 4 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] |
| 5 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] |
-| 6 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。理论上,最长可以有 16374 字节,但由于每行数据最多 16K 字节,实际上限一般小于理论值。binary 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 binary(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 byte 的存储空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 |
+| 6 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。理论上,最长可以有 16374 字节,但由于每行数据最多 16K 字节,实际上限一般小于理论值。binary 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 binary(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 byte 的存储空间,总共固定占用 20 bytes 的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 |
| 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767], -32768 用于 NULL |
| 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128 用于 NULL |
| 9 | BOOL | 1 | 布尔型,{true, false} |
@@ -208,7 +208,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
说明:可在like中使用通配符进行名称的匹配,这一通配符字符串最长不能超过24字节。
- 通配符匹配:1)'%'(百分号)匹配0到任意个字符;2)'\_'下划线匹配单个任意字符。(如果希望匹配表名中带有的下划线,那么这里可以用反斜线进行转义,也就是说 '\\\_' 会被用于匹配表名中原始带有的下划线符号)
+ 通配符匹配:1)'%'(百分号)匹配0到任意个字符;2)'\_'下划线匹配单个任意字符。
- **显示一个数据表的创建语句**
@@ -715,7 +715,7 @@ Query OK, 1 row(s) in set (0.001091s)
2. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。
3. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))`。
4. 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
-5. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功。
+5. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功;TIMESTAMP 类型支持非主键的列。
|
@@ -366,6 +443,7 @@ typedef struct {
#define kvRowColIdxAt(r, i) (kvRowColIdx(r) + (i))
#define kvRowFree(r) tfree(r)
#define kvRowEnd(r) POINTER_SHIFT(r, kvRowLen(r))
+#define kvRowValLen(r) (kvRowLen(r) - TD_KV_ROW_HEAD_SIZE - sizeof(SColIdx) * kvRowNCols(r))
#define kvRowTKey(r) (*(TKEY *)(kvRowValues(r)))
#define kvRowKey(r) tdGetKey(kvRowTKey(r))
#define kvRowDeleted(r) TKEY_IS_DELETED(kvRowTKey(r))
@@ -397,9 +475,9 @@ static FORCE_INLINE void *tdGetKVRowIdxOfCol(SKVRow row, int16_t colId) {
}
// offset here not include kvRow header length
-static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t colId, int8_t type, int32_t offset) {
+static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t colId, int8_t type, int32_t *offset) {
ASSERT(value != NULL);
- int32_t toffset = offset + TD_KV_ROW_HEAD_SIZE;
+ int32_t toffset = *offset + TD_KV_ROW_HEAD_SIZE;
SColIdx *pColIdx = (SColIdx *)POINTER_SHIFT(row, toffset);
char * ptr = (char *)POINTER_SHIFT(row, kvRowLen(row));
@@ -410,7 +488,7 @@ static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t
memcpy(ptr, value, varDataTLen(value));
kvRowLen(row) += varDataTLen(value);
} else {
- if (offset == 0) {
+ if (*offset == 0) {
ASSERT(type == TSDB_DATA_TYPE_TIMESTAMP);
TKEY tvalue = tdGetTKEY(*(TSKEY *)value);
memcpy(ptr, (void *)(&tvalue), TYPE_BYTES[type]);
@@ -419,9 +497,27 @@ static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t
}
kvRowLen(row) += TYPE_BYTES[type];
}
+ *offset += sizeof(SColIdx);
return 0;
}
+// NOTE: offset here including the header size
+static FORCE_INLINE void *tdGetKvRowDataOfCol(void *row, int32_t offset) { return POINTER_SHIFT(row, offset); }
+
+static FORCE_INLINE void *tdGetKVRowValOfColEx(SKVRow row, int16_t colId, int32_t *nIdx) {
+ while (*nIdx < kvRowNCols(row)) {
+ SColIdx *pColIdx = kvRowColIdxAt(row, *nIdx);
+ if (pColIdx->colId == colId) {
+ ++(*nIdx);
+ return tdGetKvRowDataOfCol(row, pColIdx->offset);
+ } else if (pColIdx->colId > colId) {
+ return NULL;
+ } else {
+ ++(*nIdx);
+ }
+ }
+ return NULL;
+}
// ----------------- K-V data row builder
typedef struct {
@@ -494,7 +590,7 @@ typedef void *SMemRow;
#define TD_MEM_ROW_KV_VER_SIZE sizeof(int16_t)
#define TD_MEM_ROW_KV_TYPE_VER_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_MEM_ROW_KV_VER_SIZE)
#define TD_MEM_ROW_DATA_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_DATA_ROW_HEAD_SIZE)
-// #define TD_MEM_ROW_KV_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_MEM_ROW_KV_VER_SIZE + TD_KV_ROW_HEAD_SIZE)
+#define TD_MEM_ROW_KV_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_MEM_ROW_KV_VER_SIZE + TD_KV_ROW_HEAD_SIZE)
#define SMEM_ROW_DATA 0U // SDataRow
#define SMEM_ROW_KV 1U // SKVRow
@@ -537,27 +633,80 @@ typedef void *SMemRow;
#define memRowSetType(r, t) (memRowType(r) = (t))
#define memRowSetLen(r, l) (isDataRow(r) ? memRowDataLen(r) = (l) : memRowKvLen(r) = (l))
-#define memRowSetVersion(r, v) (isDataRow(r) ? dataRowSetVersion(memRowDataBody(r), v) : memRowKvSetVersion(r, v))
+#define memRowSetVersion(r, v) (isDataRow(r) ? dataRowSetVersion(memRowDataBody(r), v) : memRowSetKvVersion(r, v))
#define memRowCpy(dst, r) memcpy((dst), (r), memRowTLen(r))
#define memRowMaxBytesFromSchema(s) (schemaTLen(s) + TD_MEM_ROW_DATA_HEAD_SIZE)
#define memRowDeleted(r) TKEY_IS_DELETED(memRowTKey(r))
SMemRow tdMemRowDup(SMemRow row);
-void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols);
-// NOTE: offset here including the header size
-static FORCE_INLINE void *tdGetKvRowDataOfCol(void *row, int32_t offset) { return POINTER_SHIFT(row, offset); }
+void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull);
+
// NOTE: offset here including the header size
-static FORCE_INLINE void *tdGetMemRowDataOfCol(void *row, int8_t type, int32_t offset) {
+static FORCE_INLINE void *tdGetMemRowDataOfCol(void *row, int16_t colId, int8_t colType, uint16_t offset) {
if (isDataRow(row)) {
- return tdGetRowDataOfCol(row, type, offset);
- } else if (isKvRow(row)) {
- return tdGetKvRowDataOfCol(row, offset);
+ return tdGetRowDataOfCol(memRowDataBody(row), colType, offset);
} else {
- ASSERT(0);
+ return tdGetKVRowValOfCol(memRowKvBody(row), colId);
}
- return NULL;
}
+/**
+ * NOTE:
+ * 1. Applicable to scan columns one by one
+ * 2. offset here including the header size
+ */
+static FORCE_INLINE void *tdGetMemRowDataOfColEx(void *row, int16_t colId, int8_t colType, int32_t offset,
+ int32_t *kvNIdx) {
+ if (isDataRow(row)) {
+ return tdGetRowDataOfCol(memRowDataBody(row), colType, offset);
+ } else {
+ return tdGetKVRowValOfColEx(memRowKvBody(row), colId, kvNIdx);
+ }
+}
+
+static FORCE_INLINE int tdAppendMemColVal(SMemRow row, const void *value, int16_t colId, int8_t type, int32_t offset,
+ int32_t *kvOffset) {
+ if (isDataRow(row)) {
+ tdAppendColVal(memRowDataBody(row), value, type, offset);
+ } else {
+ tdAppendKvColVal(memRowKvBody(row), value, colId, type, kvOffset);
+ }
+ return 0;
+}
+
+// make sure schema->flen appended for SDataRow
+static FORCE_INLINE int32_t tdGetColAppendLen(uint8_t rowType, const void *value, int8_t colType) {
+ int32_t len = 0;
+ if (IS_VAR_DATA_TYPE(colType)) {
+ len += varDataTLen(value);
+ if (rowType == SMEM_ROW_KV) {
+ len += sizeof(SColIdx);
+ }
+ } else {
+ if (rowType == SMEM_ROW_KV) {
+ len += TYPE_BYTES[colType];
+ len += sizeof(SColIdx);
+ }
+ }
+ return len;
+}
+
+
+typedef struct {
+ int16_t colId;
+ uint8_t colType;
+ char* colVal;
+} SColInfo;
+
+static FORCE_INLINE void setSColInfo(SColInfo* colInfo, int16_t colId, uint8_t colType, char* colVal) {
+ colInfo->colId = colId;
+ colInfo->colType = colType;
+ colInfo->colVal = colVal;
+}
+
+SMemRow mergeTwoMemRows(void *buffer, SMemRow row1, SMemRow row2, STSchema *pSchema1, STSchema *pSchema2);
+
+
// ----------------- Raw payload structure for row:
/* |<------------ Head ------------->|<----------- body of column data tuple ------------------->|
* | |<----------------- flen ------------->|<--- value part --->|
@@ -607,4 +756,4 @@ static FORCE_INLINE char *payloadNextCol(char *pCol) { return (char *)POINTER_SH
}
#endif
-#endif // _TD_DATA_FORMAT_H_
\ No newline at end of file
+#endif // _TD_DATA_FORMAT_H_
diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h
index 30bbaaae7390748a5fe791ed391b069599b96a3a..7290db6ec978b85ad71ea744b7ea37c6488cb9fa 100644
--- a/src/common/inc/tglobal.h
+++ b/src/common/inc/tglobal.h
@@ -210,8 +210,8 @@ extern int32_t debugFlag;
extern char lossyColumns[];
extern double fPrecision;
extern double dPrecision;
-extern uint32_t maxIntervals;
-extern uint32_t intervals;
+extern uint32_t maxRange;
+extern uint32_t curRange;
extern char Compressor[];
#endif
diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c
index 9f7432c90d5588aff29ae87575f01deb60c62ebc..8ef3d083c75c58381fc8a71f076e7e04e976d774 100644
--- a/src/common/src/tdataformat.c
+++ b/src/common/src/tdataformat.c
@@ -17,9 +17,10 @@
#include "talgo.h"
#include "tcoding.h"
#include "wchar.h"
+#include "tarray.h"
static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2,
- int limit2, int tRows);
+ int limit2, int tRows, bool forceSetNull);
/**
* Duplicate the schema and return a new object
@@ -418,7 +419,8 @@ void tdResetDataCols(SDataCols *pCols) {
}
}
}
-static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols) {
+
+static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull) {
ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) < dataRowKey(row));
int rcol = 0;
@@ -452,8 +454,10 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols
} else if (pRowCol->colId < pDataCol->colId) {
rcol++;
} else {
- // dataColSetNullAt(pDataCol, pCols->numOfRows);
- dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
+ if(forceSetNull) {
+ //dataColSetNullAt(pDataCol, pCols->numOfRows);
+ dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
+ }
dcol++;
}
}
@@ -461,7 +465,7 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols
pCols->numOfRows++;
}
-static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCols) {
+static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull) {
ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) < kvRowKey(row));
int rcol = 0;
@@ -498,8 +502,10 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo
} else if (colIdx->colId < pDataCol->colId) {
++rcol;
} else {
- // dataColSetNullAt(pDataCol, pCols->numOfRows);
- dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
+ if (forceSetNull) {
+ // dataColSetNullAt(pDataCol, pCols->numOfRows);
+ dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
+ }
++dcol;
}
}
@@ -507,17 +513,17 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo
pCols->numOfRows++;
}
-void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols) {
+void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull) {
if (isDataRow(row)) {
- tdAppendDataRowToDataCol(memRowDataBody(row), pSchema, pCols);
+ tdAppendDataRowToDataCol(memRowDataBody(row), pSchema, pCols, forceSetNull);
} else if (isKvRow(row)) {
- tdAppendKvRowToDataCol(memRowKvBody(row), pSchema, pCols);
+ tdAppendKvRowToDataCol(memRowKvBody(row), pSchema, pCols, forceSetNull);
} else {
ASSERT(0);
}
}
-int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset) {
+int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset, bool forceSetNull) {
ASSERT(rowsToMerge > 0 && rowsToMerge <= source->numOfRows);
ASSERT(target->numOfCols == source->numOfCols);
int offset = 0;
@@ -546,7 +552,7 @@ int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *
int iter1 = 0;
tdMergeTwoDataCols(target, pTarget, &iter1, pTarget->numOfRows, source, pOffset, source->numOfRows,
- pTarget->numOfRows + rowsToMerge);
+ pTarget->numOfRows + rowsToMerge, forceSetNull);
}
tdFreeDataCols(pTarget);
@@ -559,7 +565,7 @@ _err:
// src2 data has more priority than src1
static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2,
- int limit2, int tRows) {
+ int limit2, int tRows, bool forceSetNull) {
tdResetDataCols(target);
ASSERT(limit1 <= src1->numOfRows && limit2 <= src2->numOfRows);
@@ -588,7 +594,7 @@ static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, i
if ((key1 > key2) || (key1 == key2 && !TKEY_IS_DELETED(tkey2))) {
for (int i = 0; i < src2->numOfCols; i++) {
ASSERT(target->cols[i].type == src2->cols[i].type);
- if (src2->cols[i].len > 0) {
+ if (src2->cols[i].len > 0 && (forceSetNull || (!forceSetNull && !isNull(src2->cols[i].pData, src2->cols[i].type)))) {
dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src2->cols + i, *iter2), target->numOfRows,
target->maxPoints);
}
@@ -634,42 +640,28 @@ int tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value) {
SKVRow nrow = NULL;
void * ptr = taosbsearch(&colId, kvRowColIdx(row), kvRowNCols(row), sizeof(SColIdx), comparTagId, TD_GE);
- if (ptr == NULL || ((SColIdx *)ptr)->colId > colId) { // need to add a column value to the row
+ if (ptr == NULL || ((SColIdx *)ptr)->colId > colId) { // need to add a column value to the row
int diff = IS_VAR_DATA_TYPE(type) ? varDataTLen(value) : TYPE_BYTES[type];
- nrow = malloc(kvRowLen(row) + sizeof(SColIdx) + diff);
+ int nRowLen = kvRowLen(row) + sizeof(SColIdx) + diff;
+ int oRowCols = kvRowNCols(row);
+
+ ASSERT(diff > 0);
+ nrow = malloc(nRowLen);
if (nrow == NULL) return -1;
- kvRowSetLen(nrow, kvRowLen(row) + (uint16_t)sizeof(SColIdx) + diff);
- kvRowSetNCols(nrow, kvRowNCols(row) + 1);
+ kvRowSetLen(nrow, nRowLen);
+ kvRowSetNCols(nrow, oRowCols + 1);
- if (ptr == NULL) {
- memcpy(kvRowColIdx(nrow), kvRowColIdx(row), sizeof(SColIdx) * kvRowNCols(row));
- memcpy(kvRowValues(nrow), kvRowValues(row), POINTER_DISTANCE(kvRowEnd(row), kvRowValues(row)));
- int colIdx = kvRowNCols(nrow) - 1;
- kvRowColIdxAt(nrow, colIdx)->colId = colId;
- kvRowColIdxAt(nrow, colIdx)->offset = (int16_t)(POINTER_DISTANCE(kvRowEnd(row), kvRowValues(row)));
- memcpy(kvRowColVal(nrow, kvRowColIdxAt(nrow, colIdx)), value, diff);
- } else {
- int16_t tlen = (int16_t)(POINTER_DISTANCE(ptr, kvRowColIdx(row)));
- if (tlen > 0) {
- memcpy(kvRowColIdx(nrow), kvRowColIdx(row), tlen);
- memcpy(kvRowValues(nrow), kvRowValues(row), ((SColIdx *)ptr)->offset);
- }
+ memcpy(kvRowColIdx(nrow), kvRowColIdx(row), sizeof(SColIdx) * oRowCols);
+ memcpy(kvRowValues(nrow), kvRowValues(row), kvRowValLen(row));
- int colIdx = tlen / sizeof(SColIdx);
- kvRowColIdxAt(nrow, colIdx)->colId = colId;
- kvRowColIdxAt(nrow, colIdx)->offset = ((SColIdx *)ptr)->offset;
- memcpy(kvRowColVal(nrow, kvRowColIdxAt(nrow, colIdx)), value, diff);
+ pColIdx = kvRowColIdxAt(nrow, oRowCols);
+ pColIdx->colId = colId;
+ pColIdx->offset = kvRowValLen(row);
- for (int i = colIdx; i < kvRowNCols(row); i++) {
- kvRowColIdxAt(nrow, i + 1)->colId = kvRowColIdxAt(row, i)->colId;
- kvRowColIdxAt(nrow, i + 1)->offset = kvRowColIdxAt(row, i)->offset + diff;
- }
- memcpy(kvRowColVal(nrow, kvRowColIdxAt(nrow, colIdx + 1)), kvRowColVal(row, kvRowColIdxAt(row, colIdx)),
- POINTER_DISTANCE(kvRowEnd(row), kvRowColVal(row, kvRowColIdxAt(row, colIdx)))
+ memcpy(kvRowColVal(nrow, pColIdx), value, diff); // copy new value
- );
- }
+ tdSortKVRowByColIdx(nrow);
*orow = nrow;
free(row);
@@ -680,9 +672,8 @@ int tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value) {
if (varDataTLen(value) == varDataTLen(pOldVal)) { // just update the column value in place
memcpy(pOldVal, value, varDataTLen(value));
- } else { // need to reallocate the memory
- uint16_t diff = varDataTLen(value) - varDataTLen(pOldVal);
- uint16_t nlen = kvRowLen(row) + diff;
+ } else { // need to reallocate the memory
+ int16_t nlen = kvRowLen(row) + (varDataTLen(value) - varDataTLen(pOldVal));
ASSERT(nlen > 0);
nrow = malloc(nlen);
if (nrow == NULL) return -1;
@@ -690,30 +681,22 @@ int tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value) {
kvRowSetLen(nrow, nlen);
kvRowSetNCols(nrow, kvRowNCols(row));
- // Copy part ahead
- nlen = (int16_t)(POINTER_DISTANCE(ptr, kvRowColIdx(row)));
- ASSERT(nlen % sizeof(SColIdx) == 0);
- if (nlen > 0) {
- ASSERT(((SColIdx *)ptr)->offset > 0);
- memcpy(kvRowColIdx(nrow), kvRowColIdx(row), nlen);
- memcpy(kvRowValues(nrow), kvRowValues(row), ((SColIdx *)ptr)->offset);
+ int zsize = sizeof(SColIdx) * kvRowNCols(row) + ((SColIdx *)ptr)->offset;
+ memcpy(kvRowColIdx(nrow), kvRowColIdx(row), zsize);
+ memcpy(kvRowColVal(nrow, ((SColIdx *)ptr)), value, varDataTLen(value));
+ // Copy left value part
+ int lsize = kvRowLen(row) - TD_KV_ROW_HEAD_SIZE - zsize - varDataTLen(pOldVal);
+ if (lsize > 0) {
+ memcpy(POINTER_SHIFT(nrow, TD_KV_ROW_HEAD_SIZE + zsize + varDataTLen(value)),
+ POINTER_SHIFT(row, TD_KV_ROW_HEAD_SIZE + zsize + varDataTLen(pOldVal)), lsize);
}
- // Construct current column value
- int colIdx = nlen / sizeof(SColIdx);
- pColIdx = kvRowColIdxAt(nrow, colIdx);
- pColIdx->colId = ((SColIdx *)ptr)->colId;
- pColIdx->offset = ((SColIdx *)ptr)->offset;
- memcpy(kvRowColVal(nrow, pColIdx), value, varDataTLen(value));
-
- // Construct columns after
- if (kvRowNCols(nrow) - colIdx - 1 > 0) {
- for (int i = colIdx + 1; i < kvRowNCols(nrow); i++) {
- kvRowColIdxAt(nrow, i)->colId = kvRowColIdxAt(row, i)->colId;
- kvRowColIdxAt(nrow, i)->offset = kvRowColIdxAt(row, i)->offset + diff;
+ for (int i = 0; i < kvRowNCols(nrow); i++) {
+ pColIdx = kvRowColIdxAt(nrow, i);
+
+ if (pColIdx->offset > ((SColIdx *)ptr)->offset) {
+ pColIdx->offset = pColIdx->offset - varDataTLen(pOldVal) + varDataTLen(value);
}
- memcpy(kvRowColVal(nrow, kvRowColIdxAt(nrow, colIdx + 1)), kvRowColVal(row, kvRowColIdxAt(row, colIdx + 1)),
- POINTER_DISTANCE(kvRowEnd(row), kvRowColVal(row, kvRowColIdxAt(row, colIdx + 1))));
}
*orow = nrow;
@@ -784,4 +767,97 @@ SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder) {
memcpy(kvRowValues(row), pBuilder->buf, pBuilder->size);
return row;
-}
\ No newline at end of file
+}
+
+SMemRow mergeTwoMemRows(void *buffer, SMemRow row1, SMemRow row2, STSchema *pSchema1, STSchema *pSchema2) {
+#if 0
+ ASSERT(memRowKey(row1) == memRowKey(row2));
+ ASSERT(schemaVersion(pSchema1) == memRowVersion(row1));
+ ASSERT(schemaVersion(pSchema2) == memRowVersion(row2));
+ ASSERT(schemaVersion(pSchema1) >= schemaVersion(pSchema2));
+#endif
+
+ SArray *stashRow = taosArrayInit(pSchema1->numOfCols, sizeof(SColInfo));
+ if (stashRow == NULL) {
+ return NULL;
+ }
+
+ SMemRow pRow = buffer;
+ SDataRow dataRow = memRowDataBody(pRow);
+ memRowSetType(pRow, SMEM_ROW_DATA);
+ dataRowSetVersion(dataRow, schemaVersion(pSchema1)); // use latest schema version
+ dataRowSetLen(dataRow, (TDRowLenT)(TD_DATA_ROW_HEAD_SIZE + pSchema1->flen));
+
+ TDRowTLenT dataLen = 0, kvLen = TD_MEM_ROW_KV_HEAD_SIZE;
+
+ int32_t i = 0; // row1
+ int32_t j = 0; // row2
+ int32_t nCols1 = schemaNCols(pSchema1);
+ int32_t nCols2 = schemaNCols(pSchema2);
+ SColInfo colInfo = {0};
+ int32_t kvIdx1 = 0, kvIdx2 = 0;
+
+ while (i < nCols1) {
+ STColumn *pCol = schemaColAt(pSchema1, i);
+ void * val1 = tdGetMemRowDataOfColEx(row1, pCol->colId, pCol->type, TD_DATA_ROW_HEAD_SIZE + pCol->offset, &kvIdx1);
+ // if val1 != NULL, use val1;
+ if (val1 != NULL && !isNull(val1, pCol->type)) {
+ tdAppendColVal(dataRow, val1, pCol->type, pCol->offset);
+ kvLen += tdGetColAppendLen(SMEM_ROW_KV, val1, pCol->type);
+ setSColInfo(&colInfo, pCol->colId, pCol->type, val1);
+ taosArrayPush(stashRow, &colInfo);
+ ++i; // next col
+ continue;
+ }
+
+ void *val2 = NULL;
+ while (j < nCols2) {
+ STColumn *tCol = schemaColAt(pSchema2, j);
+ if (tCol->colId < pCol->colId) {
+ ++j;
+ continue;
+ }
+ if (tCol->colId == pCol->colId) {
+ val2 = tdGetMemRowDataOfColEx(row2, tCol->colId, tCol->type, TD_DATA_ROW_HEAD_SIZE + tCol->offset, &kvIdx2);
+ } else if (tCol->colId > pCol->colId) {
+ // set NULL
+ }
+ break;
+ } // end of while(jtype);
+ }
+ tdAppendColVal(dataRow, val2, pCol->type, pCol->offset);
+ if (!isNull(val2, pCol->type)) {
+ kvLen += tdGetColAppendLen(SMEM_ROW_KV, val2, pCol->type);
+ setSColInfo(&colInfo, pCol->colId, pCol->type, val2);
+ taosArrayPush(stashRow, &colInfo);
+ }
+
+ ++i; // next col
+ }
+
+ dataLen = memRowTLen(pRow);
+
+ if (kvLen < dataLen) {
+ // scan stashRow and generate SKVRow
+ memset(buffer, 0, sizeof(dataLen));
+ SMemRow tRow = buffer;
+ memRowSetType(tRow, SMEM_ROW_KV);
+ SKVRow kvRow = (SKVRow)memRowKvBody(tRow);
+ int16_t nKvNCols = (int16_t) taosArrayGetSize(stashRow);
+ kvRowSetLen(kvRow, (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nKvNCols));
+ kvRowSetNCols(kvRow, nKvNCols);
+ memRowSetKvVersion(tRow, pSchema1->version);
+
+ int32_t toffset = 0;
+ int16_t k;
+ for (k = 0; k < nKvNCols; ++k) {
+ SColInfo *pColInfo = taosArrayGet(stashRow, k);
+ tdAppendKvColVal(kvRow, pColInfo->colVal, pColInfo->colId, pColInfo->colType, &toffset);
+ }
+ ASSERT(kvLen == memRowTLen(tRow));
+ }
+ taosArrayDestroy(stashRow);
+ return buffer;
+}
diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c
index d4c52c9d1fb5702c0571306f18458a935d902de2..a58303e9fc98a0dab30da1440c32cf03c5b1fa1e 100644
--- a/src/common/src/tglobal.c
+++ b/src/common/src/tglobal.c
@@ -38,7 +38,7 @@ uint16_t tsDnodeDnodePort = 6035; // udp/tcp
uint16_t tsSyncPort = 6040;
uint16_t tsArbitratorPort = 6042;
int32_t tsStatusInterval = 1; // second
-int32_t tsNumOfMnodes = 3;
+int32_t tsNumOfMnodes = 1;
int8_t tsEnableVnodeBak = 1;
int8_t tsEnableTelemetryReporting = 1;
int8_t tsArbOnline = 0;
@@ -252,8 +252,8 @@ char lossyColumns[32] = ""; // "float|double" means all float and double column
// below option can take effect when tsLossyColumns not empty
double fPrecision = 1E-8; // float column precision
double dPrecision = 1E-16; // double column precision
-uint32_t maxIntervals = 500; // max intervals
-uint32_t intervals = 100; // intervals
+uint32_t maxRange = 500; // max range
+uint32_t curRange = 100; // range
char Compressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRESSOR
#endif
@@ -1565,8 +1565,8 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
- cfg.option = "maxIntervals";
- cfg.ptr = &maxIntervals;
+ cfg.option = "maxRange";
+ cfg.ptr = &maxRange;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
cfg.minValue = 0;
@@ -1575,8 +1575,8 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
- cfg.option = "intervals";
- cfg.ptr = &intervals;
+ cfg.option = "range";
+ cfg.ptr = &curRange;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
cfg.minValue = 0;
diff --git a/src/common/src/tname.c b/src/common/src/tname.c
index 26502c5d9cd032afd20d89ba8ea2da72b82a62c1..5da48b2e9ac9e8bdaf5158ae780379c913275780 100644
--- a/src/common/src/tname.c
+++ b/src/common/src/tname.c
@@ -319,7 +319,7 @@ int32_t tNameGetDbName(const SName* name, char* dst) {
int32_t tNameGetFullDbName(const SName* name, char* dst) {
assert(name != NULL && dst != NULL);
- snprintf(dst, TSDB_ACCT_ID_LEN + TS_PATH_DELIMITER_LEN + TSDB_DB_NAME_LEN,
+ snprintf(dst, TSDB_ACCT_ID_LEN + TS_PATH_DELIMITER_LEN + TSDB_DB_NAME_LEN, // there is a over write risk
"%s.%s", name->acctId, name->dbname);
return 0;
}
diff --git a/src/common/src/ttypes.c b/src/common/src/ttypes.c
index 6021a8f579a0dd9cf16f0505175c1080da570424..eeffe49adc9e5efe97dde580584afad280ee2993 100644
--- a/src/common/src/ttypes.c
+++ b/src/common/src/ttypes.c
@@ -38,7 +38,11 @@ const int32_t TYPE_BYTES[15] = {
#define DO_STATICS(__sum, __min, __max, __minIndex, __maxIndex, _list, _index) \
do { \
- (__sum) += (_list)[(_index)]; \
+ if (_list[(_index)] >= (INT64_MAX - (__sum))) { \
+ __sum = INT64_MAX; \
+ } else { \
+ (__sum) += (_list)[(_index)]; \
+ } \
if ((__min) > (_list)[(_index)]) { \
(__min) = (_list)[(_index)]; \
(__minIndex) = (_index); \
@@ -405,7 +409,7 @@ bool isValidDataType(int32_t type) {
return type >= TSDB_DATA_TYPE_NULL && type <= TSDB_DATA_TYPE_UBIGINT;
}
-void setVardataNull(char* val, int32_t type) {
+void setVardataNull(void* val, int32_t type) {
if (type == TSDB_DATA_TYPE_BINARY) {
varDataSetLen(val, sizeof(int8_t));
*(uint8_t*) varDataVal(val) = TSDB_DATA_BINARY_NULL;
@@ -417,75 +421,75 @@ void setVardataNull(char* val, int32_t type) {
}
}
-void setNull(char *val, int32_t type, int32_t bytes) { setNullN(val, type, bytes, 1); }
+void setNull(void *val, int32_t type, int32_t bytes) { setNullN(val, type, bytes, 1); }
-void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems) {
+void setNullN(void *val, int32_t type, int32_t bytes, int32_t numOfElems) {
switch (type) {
case TSDB_DATA_TYPE_BOOL:
for (int32_t i = 0; i < numOfElems; ++i) {
- *(uint8_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_BOOL_NULL;
+ *(uint8_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_BOOL_NULL;
}
break;
case TSDB_DATA_TYPE_TINYINT:
for (int32_t i = 0; i < numOfElems; ++i) {
- *(uint8_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_TINYINT_NULL;
+ *(uint8_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_TINYINT_NULL;
}
break;
case TSDB_DATA_TYPE_SMALLINT:
for (int32_t i = 0; i < numOfElems; ++i) {
- *(uint16_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_SMALLINT_NULL;
+ *(uint16_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_SMALLINT_NULL;
}
break;
case TSDB_DATA_TYPE_INT:
for (int32_t i = 0; i < numOfElems; ++i) {
- *(uint32_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_INT_NULL;
+ *(uint32_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_INT_NULL;
}
break;
case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_TIMESTAMP:
for (int32_t i = 0; i < numOfElems; ++i) {
- *(uint64_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_BIGINT_NULL;
+ *(uint64_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_BIGINT_NULL;
}
break;
case TSDB_DATA_TYPE_UTINYINT:
for (int32_t i = 0; i < numOfElems; ++i) {
- *(uint8_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_UTINYINT_NULL;
+ *(uint8_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_UTINYINT_NULL;
}
break;
case TSDB_DATA_TYPE_USMALLINT:
for (int32_t i = 0; i < numOfElems; ++i) {
- *(uint16_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_USMALLINT_NULL;
+ *(uint16_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_USMALLINT_NULL;
}
break;
case TSDB_DATA_TYPE_UINT:
for (int32_t i = 0; i < numOfElems; ++i) {
- *(uint32_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_UINT_NULL;
+ *(uint32_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_UINT_NULL;
}
break;
case TSDB_DATA_TYPE_UBIGINT:
for (int32_t i = 0; i < numOfElems; ++i) {
- *(uint64_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_UBIGINT_NULL;
+ *(uint64_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_UBIGINT_NULL;
}
break;
case TSDB_DATA_TYPE_FLOAT:
for (int32_t i = 0; i < numOfElems; ++i) {
- *(uint32_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_FLOAT_NULL;
+ *(uint32_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_FLOAT_NULL;
}
break;
case TSDB_DATA_TYPE_DOUBLE:
for (int32_t i = 0; i < numOfElems; ++i) {
- *(uint64_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_DOUBLE_NULL;
+ *(uint64_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_DOUBLE_NULL;
}
break;
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_BINARY:
for (int32_t i = 0; i < numOfElems; ++i) {
- setVardataNull(val + i * bytes, type);
+ setVardataNull(POINTER_SHIFT(val, i * bytes), type);
}
break;
default: {
for (int32_t i = 0; i < numOfElems; ++i) {
- *(uint32_t *)(val + i * tDataTypes[TSDB_DATA_TYPE_INT].bytes) = TSDB_DATA_INT_NULL;
+ *(uint32_t *)(POINTER_SHIFT(val, i * tDataTypes[TSDB_DATA_TYPE_INT].bytes)) = TSDB_DATA_INT_NULL;
}
break;
}
diff --git a/src/connector/C#/TDengineDriver.cs b/src/connector/C#/TDengineDriver.cs
index 2c150341f62d16372a99d341a495771e4c2a3dbc..e6c3a598adc0bc4bcf5ea84953f649b418199555 100644
--- a/src/connector/C#/TDengineDriver.cs
+++ b/src/connector/C#/TDengineDriver.cs
@@ -163,5 +163,8 @@ namespace TDengineDriver
[DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)]
static extern public int Close(IntPtr taos);
+ //get precisionin parameter restultset
+ [DllImport("taos", EntryPoint = "taos_result_precision", CallingConvention = CallingConvention.Cdecl)]
+ static extern public int ResultPrecision(IntPtr taos);
}
}
diff --git a/src/connector/grafanaplugin b/src/connector/grafanaplugin
index 3530c6df097134a410bacec6b3cd013ef38a61aa..4a4d79099b076b8ff12d5b4fdbcba54049a6866d 160000
--- a/src/connector/grafanaplugin
+++ b/src/connector/grafanaplugin
@@ -1 +1 @@
-Subproject commit 3530c6df097134a410bacec6b3cd013ef38a61aa
+Subproject commit 4a4d79099b076b8ff12d5b4fdbcba54049a6866d
diff --git a/src/connector/jdbc/CMakeLists.txt b/src/connector/jdbc/CMakeLists.txt
index 7791317969154572ad8962b281015e4217077470..e432dac1cea593b371a173f334e5313236091ab3 100644
--- a/src/connector/jdbc/CMakeLists.txt
+++ b/src/connector/jdbc/CMakeLists.txt
@@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED)
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
POST_BUILD
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
- COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-*-dist.jar ${LIBRARY_OUTPUT_PATH}
+ COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.34-dist.jar ${LIBRARY_OUTPUT_PATH}
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMENT "build jdbc driver")
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})
diff --git a/src/connector/jdbc/deploy-pom.xml b/src/connector/jdbc/deploy-pom.xml
index 06122372f027c003c1991729efc56dc8cf5929ae..ef57198e78d2268faba526d5506b0dc384f5766f 100755
--- a/src/connector/jdbc/deploy-pom.xml
+++ b/src/connector/jdbc/deploy-pom.xml
@@ -5,7 +5,7 @@
com.taosdata.jdbc
taos-jdbcdriver
- 2.0.32
+ 2.0.34
jar
JDBCDriver
diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml
index 0043536daf3b070e48ac5e0310f6d34e08ed600c..907562fe26ab831876a4b0a7edf9860cb8f297a2 100644
--- a/src/connector/jdbc/pom.xml
+++ b/src/connector/jdbc/pom.xml
@@ -3,7 +3,7 @@
4.0.0
com.taosdata.jdbc
taos-jdbcdriver
- 2.0.33
+ 2.0.34
jar
JDBCDriver
https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc
@@ -122,6 +122,7 @@
**/TSDBJNIConnectorTest.java
**/TaosInfoMonitorTest.java
**/UnsignedNumberJniTest.java
+ **/TimeZoneTest.java
true
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java
index f3f04eff126a1b4b124cec736790ff0574ddb480..740e3c6c21be568bf71e4d68a3129c527da441a6 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java
@@ -130,7 +130,7 @@ public abstract class TSDBConstants {
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
return Types.NCHAR;
}
- throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE);
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE);
}
public static String taosType2JdbcTypeName(int taosType) throws SQLException {
@@ -160,7 +160,7 @@ public abstract class TSDBConstants {
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
return "NCHAR";
default:
- throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE);
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE);
}
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java
index da89081428bb076c69be5e5aac189aa467d09307..d626698663c648ee8c39bab4d5f7831099ba8c81 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java
@@ -31,8 +31,8 @@ public class TSDBError {
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_URL_NOT_SET, "url is not set");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_INVALID_SQL, "invalid sql");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE, "numeric value out of range");
- TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE, "unknown taos type in tdengine");
- TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TIMESTAMP_PERCISION, "unknown timestamp precision");
+ TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE, "unknown taos type in tdengine");
+ TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TIMESTAMP_PRECISION, "unknown timestamp precision");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN, "unknown error");
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java
index a796e6d86f69d9c41317094f05e941a21b2ff23c..3c44d69be58c5b124493367e3d2efb8c7d835e53 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java
@@ -25,8 +25,10 @@ public class TSDBErrorNumbers {
public static final int ERROR_URL_NOT_SET = 0x2312; // url is not set
public static final int ERROR_INVALID_SQL = 0x2313; // invalid sql
public static final int ERROR_NUMERIC_VALUE_OUT_OF_RANGE = 0x2314; // numeric value out of range
- public static final int ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE = 0x2315; //unknown taos type in tdengine
- public static final int ERROR_UNKNOWN_TIMESTAMP_PERCISION = 0x2316; // unknown timestamp precision
+ public static final int ERROR_UNKNOWN_TAOS_TYPE = 0x2315; //unknown taos type in tdengine
+ public static final int ERROR_UNKNOWN_TIMESTAMP_PRECISION = 0x2316; // unknown timestamp precision
+ public static final int ERROR_RESTFul_Client_Protocol_Exception = 0x2317;
+ public static final int ERROR_RESTFul_Client_IOException = 0x2318;
public static final int ERROR_UNKNOWN = 0x2350; //unknown error
@@ -62,8 +64,11 @@ public class TSDBErrorNumbers {
errorNumbers.add(ERROR_URL_NOT_SET);
errorNumbers.add(ERROR_INVALID_SQL);
errorNumbers.add(ERROR_NUMERIC_VALUE_OUT_OF_RANGE);
- errorNumbers.add(ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE);
- errorNumbers.add(ERROR_UNKNOWN_TIMESTAMP_PERCISION);
+ errorNumbers.add(ERROR_UNKNOWN_TAOS_TYPE);
+ errorNumbers.add(ERROR_UNKNOWN_TIMESTAMP_PRECISION);
+ errorNumbers.add(ERROR_RESTFul_Client_IOException);
+
+ errorNumbers.add(ERROR_RESTFul_Client_Protocol_Exception);
errorNumbers.add(ERROR_SUBSCRIBE_FAILED);
errorNumbers.add(ERROR_UNSUPPORTED_ENCODING);
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java
index 051eca7e10ad18daea6a7b1ad55f148b786e0798..4fdbb308c54c23a1fb427f1e9f1530894b0daae1 100755
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java
@@ -80,7 +80,8 @@ public class TSDBJNIConnector {
this.taos = this.connectImp(host, port, dbName, user, password);
if (this.taos == TSDBConstants.JNI_NULL_POINTER) {
- throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL);
+ String errMsg = this.getErrMsg(0);
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL, errMsg);
}
// invoke connectImp only here
taosInfo.conn_open_increment();
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java
index f0ea03638f620dacda03a6045cc0979975cea698..1ea39236b666fda106c3ee3534560b6380d7bec9 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java
@@ -213,7 +213,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
long nanoAdjustment = Integer.parseInt(value.substring(20));
return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment));
}
- throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TIMESTAMP_PERCISION);
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TIMESTAMP_PRECISION);
}
}
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java
index e10bdb5aa93ddeae29e22018fb9fe6bd08a6d44e..de26ab7f1f458a4587ce15bebab3c2c1b0dbc070 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java
@@ -1,15 +1,18 @@
package com.taosdata.jdbc.utils;
+import com.taosdata.jdbc.TSDBError;
+import com.taosdata.jdbc.TSDBErrorNumbers;
import org.apache.http.HeaderElement;
import org.apache.http.HeaderElementIterator;
import org.apache.http.HttpEntity;
+import org.apache.http.client.ClientProtocolException;
+import org.apache.http.client.HttpRequestRetryHandler;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.*;
import org.apache.http.client.protocol.HttpClientContext;
import org.apache.http.conn.ConnectionKeepAliveStrategy;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.CloseableHttpClient;
-import org.apache.http.impl.client.DefaultHttpRequestRetryHandler;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
import org.apache.http.message.BasicHeaderElementIterator;
@@ -17,35 +20,24 @@ import org.apache.http.protocol.HTTP;
import org.apache.http.protocol.HttpContext;
import org.apache.http.util.EntityUtils;
+import javax.net.ssl.SSLException;
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.net.UnknownHostException;
import java.nio.charset.StandardCharsets;
-
+import java.sql.SQLException;
public class HttpClientPoolUtil {
private static final String DEFAULT_CONTENT_TYPE = "application/json";
+ private static final int DEFAULT_MAX_TOTAL = 200;
+ private static final int DEFAULT_MAX_PER_ROUTE = 20;
private static final int DEFAULT_TIME_OUT = 15000;
- private static final int DEFAULT_MAX_PER_ROUTE = 32;
- private static final int DEFAULT_MAX_TOTAL = 1000;
private static final int DEFAULT_HTTP_KEEP_TIME = 15000;
-
- private static CloseableHttpClient httpClient;
-
- private static synchronized void initPools() {
- if (httpClient == null) {
- PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager();
- connectionManager.setDefaultMaxPerRoute(DEFAULT_MAX_PER_ROUTE);
- connectionManager.setMaxTotal(DEFAULT_MAX_TOTAL);
- httpClient = HttpClients.custom()
- .setKeepAliveStrategy(DEFAULT_KEEP_ALIVE_STRATEGY)
- .setConnectionManager(connectionManager)
- .setRetryHandler(new DefaultHttpRequestRetryHandler(3, true))
- .build();
- }
- }
+ private static final int DEFAULT_MAX_RETRY_COUNT = 5;
private static final ConnectionKeepAliveStrategy DEFAULT_KEEP_ALIVE_STRATEGY = (response, context) -> {
HeaderElementIterator it = new BasicHeaderElementIterator(response.headerIterator(HTTP.CONN_KEEP_ALIVE));
- int keepTime = DEFAULT_HTTP_KEEP_TIME * 1000;
while (it.hasNext()) {
HeaderElement headerElement = it.nextElement();
String param = headerElement.getName();
@@ -53,34 +45,73 @@ public class HttpClientPoolUtil {
if (value != null && param.equalsIgnoreCase("timeout")) {
try {
return Long.parseLong(value) * 1000;
- } catch (Exception e) {
- new Exception("format KeepAlive timeout exception, exception:" + e.toString()).printStackTrace();
+ } catch (NumberFormatException ignore) {
}
}
}
- return keepTime;
+ return DEFAULT_HTTP_KEEP_TIME * 1000;
+ };
+
+ private static final HttpRequestRetryHandler retryHandler = (exception, executionCount, httpContext) -> {
+ if (executionCount >= DEFAULT_MAX_RETRY_COUNT)
+ // do not retry if over max retry count
+ return false;
+ if (exception instanceof InterruptedIOException)
+ // timeout
+ return false;
+ if (exception instanceof UnknownHostException)
+ // unknown host
+ return false;
+ if (exception instanceof SSLException)
+ // SSL handshake exception
+ return false;
+ return true;
};
- /**
- * 执行http post请求
- * 默认采用Content-Type:application/json,Accept:application/json
- *
- * @param uri 请求地址
- * @param data 请求数据
- * @return responseBody
- */
- public static String execute(String uri, String data, String token) {
- long startTime = System.currentTimeMillis();
+ private static CloseableHttpClient httpClient;
+
+ static {
+ PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager();
+ connectionManager.setMaxTotal(DEFAULT_MAX_TOTAL);
+ connectionManager.setDefaultMaxPerRoute(DEFAULT_MAX_PER_ROUTE);
+ httpClient = HttpClients.custom().setKeepAliveStrategy(DEFAULT_KEEP_ALIVE_STRATEGY).setConnectionManager(connectionManager).setRetryHandler(retryHandler).build();
+ }
+
+ /*** execute GET request ***/
+ public static String execute(String uri) throws SQLException {
HttpEntity httpEntity = null;
- HttpEntityEnclosingRequestBase method = null;
String responseBody = "";
try {
- if (httpClient == null) {
- initPools();
+ HttpRequestBase method = getRequest(uri, HttpGet.METHOD_NAME);
+ HttpContext context = HttpClientContext.create();
+ CloseableHttpResponse httpResponse = httpClient.execute(method, context);
+ httpEntity = httpResponse.getEntity();
+ if (httpEntity != null) {
+ responseBody = EntityUtils.toString(httpEntity, StandardCharsets.UTF_8);
}
- method = (HttpEntityEnclosingRequestBase) getRequest(uri, HttpPost.METHOD_NAME, DEFAULT_CONTENT_TYPE, 0);
- method.setHeader("Content-Type", "text/plain");
- method.setHeader("Connection", "keep-alive");
+ } catch (ClientProtocolException e) {
+ e.printStackTrace();
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_Protocol_Exception, e.getMessage());
+ } catch (IOException exception) {
+ exception.printStackTrace();
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_IOException, exception.getMessage());
+ } finally {
+ if (httpEntity != null) {
+ EntityUtils.consumeQuietly(httpEntity);
+ }
+ }
+ return responseBody;
+ }
+
+
+ /*** execute POST request ***/
+ public static String execute(String uri, String data, String token) throws SQLException {
+ HttpEntity httpEntity = null;
+ String responseBody = "";
+ try {
+ HttpEntityEnclosingRequestBase method = (HttpEntityEnclosingRequestBase) getRequest(uri, HttpPost.METHOD_NAME);
+ method.setHeader(HTTP.CONTENT_TYPE, "text/plain");
+ method.setHeader(HTTP.CONN_DIRECTIVE, HTTP.CONN_KEEP_ALIVE);
method.setHeader("Authorization", "Taosd " + token);
method.setEntity(new StringEntity(data, StandardCharsets.UTF_8));
@@ -88,46 +119,31 @@ public class HttpClientPoolUtil {
CloseableHttpResponse httpResponse = httpClient.execute(method, context);
httpEntity = httpResponse.getEntity();
if (httpEntity != null) {
- responseBody = EntityUtils.toString(httpEntity, "UTF-8");
+ responseBody = EntityUtils.toString(httpEntity, StandardCharsets.UTF_8);
}
- } catch (Exception e) {
- if (method != null) {
- method.abort();
- }
- new Exception("execute post request exception, url:" + uri + ", exception:" + e.toString() + ", cost time(ms):" + (System.currentTimeMillis() - startTime)).printStackTrace();
+ } catch (ClientProtocolException e) {
+ e.printStackTrace();
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_Protocol_Exception, e.getMessage());
+ } catch (IOException exception) {
+ exception.printStackTrace();
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_IOException, exception.getMessage());
} finally {
if (httpEntity != null) {
- try {
- EntityUtils.consumeQuietly(httpEntity);
- } catch (Exception e) {
- new Exception("close response exception, url:" + uri + ", exception:" + e.toString() + ", cost time(ms):" + (System.currentTimeMillis() - startTime)).printStackTrace();
- }
+ EntityUtils.consumeQuietly(httpEntity);
}
}
return responseBody;
}
- /**
- * * 创建请求
- *
- * @param uri 请求url
- * @param methodName 请求的方法类型
- * @param contentType contentType类型
- * @param timeout 超时时间
- * @return HttpRequestBase 返回类型
- * @author lisc
- */
- private static HttpRequestBase getRequest(String uri, String methodName, String contentType, int timeout) {
- if (httpClient == null) {
- initPools();
- }
+ /*** create http request ***/
+ private static HttpRequestBase getRequest(String uri, String methodName) {
HttpRequestBase method;
- if (timeout <= 0) {
- timeout = DEFAULT_TIME_OUT;
- }
- RequestConfig requestConfig = RequestConfig.custom().setSocketTimeout(timeout * 1000)
- .setConnectTimeout(timeout * 1000).setConnectionRequestTimeout(timeout * 1000)
- .setExpectContinueEnabled(false).build();
+ RequestConfig requestConfig = RequestConfig.custom()
+ .setSocketTimeout(DEFAULT_TIME_OUT * 1000)
+ .setConnectTimeout(DEFAULT_TIME_OUT * 1000)
+ .setConnectionRequestTimeout(DEFAULT_TIME_OUT * 1000)
+ .setExpectContinueEnabled(false)
+ .build();
if (HttpPut.METHOD_NAME.equalsIgnoreCase(methodName)) {
method = new HttpPut(uri);
} else if (HttpPost.METHOD_NAME.equalsIgnoreCase(methodName)) {
@@ -137,52 +153,10 @@ public class HttpClientPoolUtil {
} else {
method = new HttpPost(uri);
}
-
- if (contentType == null || contentType.isEmpty() || contentType.replaceAll("\\s", "").isEmpty()) {
- contentType = DEFAULT_CONTENT_TYPE;
- }
- method.addHeader("Content-Type", contentType);
- method.addHeader("Accept", contentType);
+ method.addHeader(HTTP.CONTENT_TYPE, DEFAULT_CONTENT_TYPE);
+ method.addHeader("Accept", DEFAULT_CONTENT_TYPE);
method.setConfig(requestConfig);
return method;
}
- /**
- * 执行GET 请求
- *
- * @param uri 网址
- * @return responseBody
- */
- public static String execute(String uri) {
- long startTime = System.currentTimeMillis();
- HttpEntity httpEntity = null;
- HttpRequestBase method = null;
- String responseBody = "";
- try {
- if (httpClient == null) {
- initPools();
- }
- method = getRequest(uri, HttpGet.METHOD_NAME, DEFAULT_CONTENT_TYPE, 0);
- HttpContext context = HttpClientContext.create();
- CloseableHttpResponse httpResponse = httpClient.execute(method, context);
- httpEntity = httpResponse.getEntity();
- if (httpEntity != null) {
- responseBody = EntityUtils.toString(httpEntity, "UTF-8");
- }
- } catch (Exception e) {
- if (method != null) {
- method.abort();
- }
- e.printStackTrace();
- } finally {
- if (httpEntity != null) {
- try {
- EntityUtils.consumeQuietly(httpEntity);
- } catch (Exception e) {
- new Exception("close response exception, url:" + uri + ", exception:" + e.toString() + ",cost time(ms):" + (System.currentTimeMillis() - startTime)).printStackTrace();
- }
- }
- }
- return responseBody;
- }
}
\ No newline at end of file
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java
index 535e56f7d7735a7cbd209fbb2a2fddd492021e15..3fea221446775a779593f8c74c77474bc55fb071 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java
@@ -34,9 +34,8 @@ public class QueryDataTest {
String createTableSql = "create table " + stbName + "(ts timestamp, name binary(64))";
statement.executeUpdate(createTableSql);
-
} catch (SQLException e) {
- return;
+ e.printStackTrace();
}
}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimeZoneTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimeZoneTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..94a175ad5c7fd50fa35d6b45ea59ab26ffc02ce1
--- /dev/null
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimeZoneTest.java
@@ -0,0 +1,71 @@
+package com.taosdata.jdbc.cases;
+
+import com.taosdata.jdbc.TSDBDriver;
+import org.junit.Test;
+
+import java.sql.*;
+import java.time.Instant;
+import java.time.LocalDateTime;
+import java.time.ZoneId;
+import java.util.Properties;
+
+public class TimeZoneTest {
+
+ private String url = "jdbc:TAOS://127.0.0.1:6030/?user=root&password=taosdata";
+
+ @Test
+ public void javaTimeZone() {
+ LocalDateTime localDateTime = LocalDateTime.of(1970, 1, 1, 0, 0, 0);
+
+ Instant instant = localDateTime.atZone(ZoneId.of("UTC-8")).toInstant();
+ System.out.println("UTC-8: " + instant.getEpochSecond() + "," + instant);
+
+ instant = localDateTime.atZone(ZoneId.of("UT")).toInstant();
+ System.out.println("UTC: " + instant.getEpochSecond() + "," + instant);
+
+
+ instant = localDateTime.atZone(ZoneId.of("UTC+8")).toInstant();
+ System.out.println("UTC+8: " + instant.getEpochSecond() + "," + instant);
+ }
+
+ @Test
+ public void taosTimeZone() {
+ // given
+ Properties props = new Properties();
+ props.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
+
+ // when and then
+ try (Connection connection = DriverManager.getConnection(url, props)) {
+ Statement stmt = connection.createStatement();
+
+ stmt.execute("drop database if exists timezone_test");
+ stmt.execute("create database if not exists timezone_test keep 365000");
+ stmt.execute("use timezone_test");
+ stmt.execute("create table weather(ts timestamp, temperature float)");
+
+ stmt.execute("insert into timezone_test.weather(ts, temperature) values('1970-01-01 00:00:00', 1.0)");
+
+ ResultSet rs = stmt.executeQuery("select * from timezone_test.weather");
+ while (rs.next()) {
+ Timestamp ts = rs.getTimestamp("ts");
+ System.out.println("ts: " + ts.getTime() + "," + ts);
+ }
+
+ stmt.execute("insert into timezone_test.weather(ts, temperature, humidity) values('1970-01-02 00:00:00', 1.0, 2.0)");
+
+ rs = stmt.executeQuery("select * from timezone_test.weather");
+ while (rs.next()) {
+ Timestamp ts = rs.getTimestamp("ts");
+ System.out.println("ts: " + ts.getTime() + "," + ts);
+ }
+
+
+ stmt.execute("drop database if exists timezone_test");
+
+ stmt.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+}
\ No newline at end of file
diff --git a/src/connector/python/taos/cinterface.py b/src/connector/python/taos/cinterface.py
index 6d8ceb7a293ef71c7e8944772e6b8a6a0ed8e7a9..660707bfcd04edb9d815b38d8ae806f35d2bfe2b 100644
--- a/src/connector/python/taos/cinterface.py
+++ b/src/connector/python/taos/cinterface.py
@@ -7,11 +7,11 @@ import platform
def _convert_millisecond_to_datetime(milli):
- return datetime.datetime.fromtimestamp(milli / 1000.0)
+ return datetime.datetime.fromtimestamp(0) + datetime.timedelta(seconds=milli/1000.0)
def _convert_microsecond_to_datetime(micro):
- return datetime.datetime.fromtimestamp(micro / 1000000.0)
+ return datetime.datetime.fromtimestamp(0) + datetime.timedelta(seconds=micro / 1000000.0)
def _convert_nanosecond_to_datetime(nanosec):
diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c
index 61244ac0fbe2cfd412df173d22b0feecd7327916..eac04fe7bb2c53bd55dcbf8b6f5044d99007dc6c 100644
--- a/src/dnode/src/dnodeMain.c
+++ b/src/dnode/src/dnodeMain.c
@@ -40,8 +40,9 @@
#include "dnodeShell.h"
#include "dnodeTelemetry.h"
#include "module.h"
-#include "qScript.h"
#include "mnode.h"
+#include "qScript.h"
+#include "tcache.h"
#include "tscompression.h"
#if !defined(_MODULE) || !defined(_TD_LINUX)
@@ -208,6 +209,7 @@ void dnodeCleanUpSystem() {
dnodeCleanupComponents();
taos_cleanup();
taosCloseLog();
+ taosStopCacheRefreshWorker();
}
}
@@ -320,12 +322,12 @@ static int32_t dnodeInitStorage() {
static void dnodeCleanupStorage() {
// storage destroy
- tfsDestroy();
+ tfsDestroy();
- #ifdef TD_TSZ
+ #ifdef TD_TSZ
// compress destroy
tsCompressExit();
- #endif
+ #endif
}
bool dnodeIsFirstDeploy() {
diff --git a/src/dnode/src/dnodeTelemetry.c b/src/dnode/src/dnodeTelemetry.c
index 59b66879d42e3133ec7143c95f31c9b5df8ddb60..22a6dc5b1993b6d15510b078ac4245909221ae78 100644
--- a/src/dnode/src/dnodeTelemetry.c
+++ b/src/dnode/src/dnodeTelemetry.c
@@ -245,7 +245,7 @@ static void* telemetryThread(void* param) {
clock_gettime(CLOCK_REALTIME, &end);
end.tv_sec += 300; // wait 5 minutes before send first report
- setThreadName("telemetryThrd");
+ setThreadName("telemetry");
while (!tsExit) {
int r = 0;
diff --git a/src/dnode/src/dnodeVRead.c b/src/dnode/src/dnodeVRead.c
index e8003a8fe7996316d0b91689ea9738cbd24184b9..c404ab1a55c3788f5756c99f7914764e6e9af295 100644
--- a/src/dnode/src/dnodeVRead.c
+++ b/src/dnode/src/dnodeVRead.c
@@ -118,10 +118,11 @@ static void *dnodeProcessReadQueue(void *wparam) {
SVReadMsg * pRead;
int32_t qtype;
void * pVnode;
- char name[16];
- memset(name, 0, 16);
- snprintf(name, 16, "%s-dnReadQ", pPool->name);
+ char* threadname = strcmp(pPool->name, "vquery") == 0? "dnodeQueryQ":"dnodeFetchQ";
+
+ char name[16] = {0};
+ snprintf(name, tListLen(name), "%s", threadname);
setThreadName(name);
while (1) {
diff --git a/src/dnode/src/dnodeVnodes.c b/src/dnode/src/dnodeVnodes.c
index 8ea8e280de10f0657ad0b937fb78794175d8c20a..a5b0e9fe30e88f89af2e79af16602dac9500a305 100644
--- a/src/dnode/src/dnodeVnodes.c
+++ b/src/dnode/src/dnodeVnodes.c
@@ -90,7 +90,6 @@ static void *dnodeOpenVnode(void *param) {
char stepDesc[TSDB_STEP_DESC_LEN] = {0};
dDebug("thread:%d, start to open %d vnodes", pThread->threadIndex, pThread->vnodeNum);
-
setThreadName("dnodeOpenVnode");
for (int32_t v = 0; v < pThread->vnodeNum; ++v) {
diff --git a/src/inc/taos.h b/src/inc/taos.h
index ba53c1ca8f57632d4270213be8eb7a7dbecd4dd2..6fa30737e71e8f40cee817386ad4d2c26661777f 100644
--- a/src/inc/taos.h
+++ b/src/inc/taos.h
@@ -110,19 +110,18 @@ typedef struct TAOS_MULTI_BIND {
int num;
} TAOS_MULTI_BIND;
-
-
DLL_EXPORT TAOS_STMT *taos_stmt_init(TAOS *taos);
DLL_EXPORT int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length);
DLL_EXPORT int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags);
DLL_EXPORT int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name);
DLL_EXPORT int taos_stmt_set_sub_tbname(TAOS_STMT* stmt, const char* name);
+
DLL_EXPORT int taos_stmt_is_insert(TAOS_STMT *stmt, int *insert);
DLL_EXPORT int taos_stmt_num_params(TAOS_STMT *stmt, int *nums);
-int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes);
+DLL_EXPORT int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes);
DLL_EXPORT int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind);
-int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind);
-int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int colIdx);
+DLL_EXPORT int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind);
+DLL_EXPORT int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int colIdx);
DLL_EXPORT int taos_stmt_add_batch(TAOS_STMT *stmt);
DLL_EXPORT int taos_stmt_execute(TAOS_STMT *stmt);
DLL_EXPORT TAOS_RES * taos_stmt_use_result(TAOS_STMT *stmt);
@@ -141,7 +140,6 @@ DLL_EXPORT int taos_select_db(TAOS *taos, const char *db);
DLL_EXPORT int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields);
DLL_EXPORT void taos_stop_query(TAOS_RES *res);
DLL_EXPORT bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col);
-
DLL_EXPORT int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows);
DLL_EXPORT int taos_validate_sql(TAOS *taos, const char *sql);
diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h
index ca8ad3cc09f99d06c1f0406bb7124a8aad6fef55..94be247b0d8163be09ffff981636258779b6bfca 100644
--- a/src/inc/taosdef.h
+++ b/src/inc/taosdef.h
@@ -307,7 +307,7 @@ do { \
#define TSDB_DEFAULT_WAL_LEVEL 1
#define TSDB_MIN_DB_UPDATE 0
-#define TSDB_MAX_DB_UPDATE 1
+#define TSDB_MAX_DB_UPDATE 2
#define TSDB_DEFAULT_DB_UPDATE_OPTION 0
#define TSDB_MIN_DB_CACHE_LAST_ROW 0
@@ -365,6 +365,7 @@ do { \
#define TSDB_QUERY_TYPE_MULTITABLE_QUERY 0x200u
#define TSDB_QUERY_TYPE_FILE_INSERT 0x400u // insert data from file
#define TSDB_QUERY_TYPE_STMT_INSERT 0x800u // stmt insert type
+#define TSDB_QUERY_TYPE_NEST_SUBQUERY 0x1000u // nested sub query
#define TSDB_QUERY_HAS_TYPE(x, _type) (((x) & (_type)) != 0)
#define TSDB_QUERY_SET_TYPE(x, _type) ((x) |= (_type))
@@ -435,6 +436,12 @@ typedef enum {
TSDB_CHECK_ITEM_MAX
} ECheckItemType;
+typedef enum {
+ TD_ROW_DISCARD_UPDATE = 0,
+ TD_ROW_OVERWRITE_UPDATE = 1,
+ TD_ROW_PARTIAL_UPDATE = 2
+} TDUpdateConfig;
+
extern char *qtypeStr[];
#ifdef __cplusplus
diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h
index f57e553e3f43053552e30a5191abbd7374032f9d..2214078f5587799ff4daea4f708e920a95e97fcf 100644
--- a/src/inc/taoserror.h
+++ b/src/inc/taoserror.h
@@ -102,6 +102,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_TSC_EXCEED_SQL_LIMIT TAOS_DEF_ERROR_CODE(0, 0x0219) //"SQL statement too long check maxSQLLength config")
#define TSDB_CODE_TSC_FILE_EMPTY TAOS_DEF_ERROR_CODE(0, 0x021A) //"File is empty")
#define TSDB_CODE_TSC_LINE_SYNTAX_ERROR TAOS_DEF_ERROR_CODE(0, 0x021B) //"Syntax error in Line")
+#define TSDB_CODE_TSC_NO_META_CACHED TAOS_DEF_ERROR_CODE(0, 0x021C) //"No table meta cached")
// mnode
#define TSDB_CODE_MND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0300) //"Message not processed")
diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h
index 9ee241efc1840b522919ec123dff25ee625d9bff..fb5bbe6c2d2442376f8937820822f654e1b41163 100644
--- a/src/inc/taosmsg.h
+++ b/src/inc/taosmsg.h
@@ -809,7 +809,7 @@ typedef struct SMultiTableMeta {
int32_t contLen;
uint8_t compressed; // denote if compressed or not
uint32_t rawLen; // size before compress
- uint8_t metaClone; // make meta clone after retrieve meta from mnode
+ uint8_t metaClone; // make meta clone after retrieve meta from mnode
char meta[];
} SMultiTableMeta;
@@ -874,6 +874,10 @@ typedef struct {
int64_t useconds;
int64_t stime;
uint64_t qId;
+ uint64_t sqlObjId;
+ int32_t pid;
+ char fqdn[TSDB_FQDN_LEN];
+ int32_t numOfSub;
} SQueryDesc;
typedef struct {
diff --git a/src/inc/tcq.h b/src/inc/tcq.h
index 27c043f960c2b77609b59f2188bfa0fa67c4b3b1..7338cccfeee184e7f6834e41064e0c71fe5145a0 100644
--- a/src/inc/tcq.h
+++ b/src/inc/tcq.h
@@ -27,7 +27,7 @@ typedef struct {
int32_t vgId;
char user[TSDB_USER_LEN];
char pass[TSDB_KEY_LEN];
- char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN];
+ char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN]; // size must same with SVnodeObj.db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN]
FCqWrite cqWrite;
} SCqCfg;
diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h
index 79d9029dbc1603fe145e8b12553e2d03c028ac60..d1972bdcead7fe931078c1bbdfd4c3b84841850a 100644
--- a/src/inc/tsdb.h
+++ b/src/inc/tsdb.h
@@ -111,7 +111,7 @@ typedef struct {
uint64_t superUid;
STSchema * schema;
STSchema * tagSchema;
- SDataRow tagValues;
+ SKVRow tagValues;
char * sql;
} STableCfg;
@@ -240,6 +240,7 @@ typedef struct {
int32_t minRows;
int32_t firstSeekTimeUs;
uint32_t numOfRowsInMemTable;
+ uint32_t numOfSmallBlocks;
SArray *dataBlockInfos;
} STableBlockDist;
diff --git a/src/inc/ttype.h b/src/inc/ttype.h
index 6e436bd23db180de61a2d07904c8890aa3581ff7..44e666106a7657691b0d97d259ccb7b61871b9a7 100644
--- a/src/inc/ttype.h
+++ b/src/inc/ttype.h
@@ -138,8 +138,10 @@ typedef struct {
#define IS_VALID_USMALLINT(_t) ((_t) >= 0 && (_t) < UINT16_MAX)
#define IS_VALID_UINT(_t) ((_t) >= 0 && (_t) < UINT32_MAX)
#define IS_VALID_UBIGINT(_t) ((_t) >= 0 && (_t) < UINT64_MAX)
+#define IS_VALID_FLOAT(_t) ((_t) >= -FLT_MAX && (_t) <= FLT_MAX)
+#define IS_VALID_DOUBLE(_t) ((_t) >= -DBL_MAX && (_t) <= DBL_MAX)
-static FORCE_INLINE bool isNull(const char *val, int32_t type) {
+static FORCE_INLINE bool isNull(const void *val, int32_t type) {
switch (type) {
case TSDB_DATA_TYPE_BOOL:
return *(uint8_t *)val == TSDB_DATA_BOOL_NULL;
@@ -191,9 +193,9 @@ extern tDataTypeDescriptor tDataTypes[15];
bool isValidDataType(int32_t type);
-void setVardataNull(char* val, int32_t type);
-void setNull(char *val, int32_t type, int32_t bytes);
-void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems);
+void setVardataNull(void* val, int32_t type);
+void setNull(void *val, int32_t type, int32_t bytes);
+void setNullN(void *val, int32_t type, int32_t bytes, int32_t numOfElems);
const void *getNullValue(int32_t type);
void assignVal(char *val, const char *src, int32_t len, int32_t type);
diff --git a/src/kit/shell/inc/shellCommand.h b/src/kit/shell/inc/shellCommand.h
index a08c1f48d11a8cd0e51fa5fb2d05a16da96d38c9..6e4d3e382e3d7e8c50405c07da8ed73725230434 100644
--- a/src/kit/shell/inc/shellCommand.h
+++ b/src/kit/shell/inc/shellCommand.h
@@ -35,6 +35,8 @@ struct Command {
};
extern void backspaceChar(Command *cmd);
+extern void clearLineBefore(Command *cmd);
+extern void clearLineAfter(Command *cmd);
extern void deleteChar(Command *cmd);
extern void moveCursorLeft(Command *cmd);
extern void moveCursorRight(Command *cmd);
diff --git a/src/kit/shell/src/shellCommand.c b/src/kit/shell/src/shellCommand.c
index e1a3dfe10205715d8c5cf8677a5be60c1a478b05..67e0c949890728268afcaf67804dd20e10231ba4 100644
--- a/src/kit/shell/src/shellCommand.c
+++ b/src/kit/shell/src/shellCommand.c
@@ -102,6 +102,28 @@ void backspaceChar(Command *cmd) {
}
}
+void clearLineBefore(Command *cmd) {
+ assert(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset);
+
+ clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size);
+ memmove(cmd->command, cmd->command + cmd->cursorOffset,
+ cmd->commandSize - cmd->cursorOffset);
+ cmd->commandSize -= cmd->cursorOffset;
+ cmd->cursorOffset = 0;
+ cmd->screenOffset = 0;
+ cmd->endOffset = cmd->commandSize;
+ showOnScreen(cmd);
+}
+
+void clearLineAfter(Command *cmd) {
+ assert(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset);
+
+ clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size);
+ cmd->commandSize -= cmd->endOffset - cmd->cursorOffset;
+ cmd->endOffset = cmd->cursorOffset;
+ showOnScreen(cmd);
+}
+
void deleteChar(Command *cmd) {
assert(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset);
diff --git a/src/kit/shell/src/shellDarwin.c b/src/kit/shell/src/shellDarwin.c
index 86c0fea5739ff624cada266e09920f875c24a69a..4dcd8b3d50628f626e681700e131ffb0a3b875e1 100644
--- a/src/kit/shell/src/shellDarwin.c
+++ b/src/kit/shell/src/shellDarwin.c
@@ -238,10 +238,16 @@ int32_t shellReadCommand(TAOS *con, char *command) {
updateBuffer(&cmd);
}
break;
+ case 11: // Ctrl + K;
+ clearLineAfter(&cmd);
+ break;
case 12: // Ctrl + L;
system("clear");
showOnScreen(&cmd);
break;
+ case 21: // Ctrl + U
+ clearLineBefore(&cmd);
+ break;
}
} else if (c == '\033') {
c = getchar();
diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c
index 2a32a8d82e73794fbea930c174d6e51f9c194fc3..dc74f6fcaa152c547d734ed4e186b45b94ce8de5 100644
--- a/src/kit/shell/src/shellLinux.c
+++ b/src/kit/shell/src/shellLinux.c
@@ -238,10 +238,16 @@ int32_t shellReadCommand(TAOS *con, char *command) {
updateBuffer(&cmd);
}
break;
+ case 11: // Ctrl + K;
+ clearLineAfter(&cmd);
+ break;
case 12: // Ctrl + L;
system("clear");
showOnScreen(&cmd);
break;
+ case 21: // Ctrl + U;
+ clearLineBefore(&cmd);
+ break;
}
} else if (c == '\033') {
c = (char)getchar();
diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c
index 1630d6f89509cc12c5f49ee939869cd681b0e57a..5ac85f87f1a7f9055241cf8f813c75b9bc72f46a 100644
--- a/src/kit/taosdemo/taosdemo.c
+++ b/src/kit/taosdemo/taosdemo.c
@@ -16,7 +16,7 @@
/*
when in some thread query return error, thread don't exit, but return, otherwise coredump in other thread.
-*/
+ */
#include
#include
@@ -24,24 +24,24 @@
#define CURL_STATICLIB
#ifdef LINUX
- #include
- #include
- #ifndef _ALPINE
- #include
- #endif
- #include
- #include
- #include
- #include
- #include
- #include
- #include
- #include
- #include
- #include
+#include
+#include
+#ifndef _ALPINE
+#include
+#endif
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
#else
- #include
- #include
+#include
+#include
#endif
#include
@@ -66,10 +66,6 @@
extern char configDir[];
-#define INSERT_JSON_NAME "insert.json"
-#define QUERY_JSON_NAME "query.json"
-#define SUBSCRIBE_JSON_NAME "subscribe.json"
-
#define STR_INSERT_INTO "INSERT INTO "
#define MAX_RECORDS_PER_REQ 32766
@@ -81,13 +77,22 @@ extern char configDir[];
#define COL_BUFFER_LEN ((TSDB_COL_NAME_LEN + 15) * TSDB_MAX_COLUMNS)
#define MAX_USERNAME_SIZE 64
#define MAX_PASSWORD_SIZE 64
-#define MAX_HOSTNAME_SIZE 64
+#define MAX_HOSTNAME_SIZE 253 // https://man7.org/linux/man-pages/man7/hostname.7.html
#define MAX_TB_NAME_SIZE 64
#define MAX_DATA_SIZE (16*TSDB_MAX_COLUMNS)+20 // max record len: 16*MAX_COLUMNS, timestamp string and ,('') need extra space
#define OPT_ABORT 1 /* –abort */
-#define MAX_PREPARED_RAND 1000000
#define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255.
+#define MAX_PREPARED_RAND 1000000
+#define INT_BUFF_LEN 11
+#define BIGINT_BUFF_LEN 21
+#define SMALLINT_BUFF_LEN 6
+#define TINYINT_BUFF_LEN 4
+#define BOOL_BUFF_LEN 6
+#define FLOAT_BUFF_LEN 22
+#define DOUBLE_BUFF_LEN 42
+#define TIMESTAMP_BUFF_LEN 21
+
#define MAX_SAMPLES_ONCE_FROM_FILE 10000
#define MAX_NUM_COLUMNS (TSDB_MAX_COLUMNS - 1) // exclude first column timestamp
@@ -95,11 +100,15 @@ extern char configDir[];
#define MAX_SUPER_TABLE_COUNT 200
#define MAX_QUERY_SQL_COUNT 100
-#define MAX_QUERY_SQL_LENGTH BUFFER_SIZE
#define MAX_DATABASE_COUNT 256
#define INPUT_BUF_LEN 256
+#define TBNAME_PREFIX_LEN (TSDB_TABLE_NAME_LEN - 20) // 20 characters reserved for seq
+#define SMALL_BUFF_LEN 8
+#define DATATYPE_BUFF_LEN (SMALL_BUFF_LEN*3)
+#define NOTE_BUFF_LEN (SMALL_BUFF_LEN*16)
+
#define DEFAULT_TIMESTAMP_STEP 1
@@ -225,6 +234,7 @@ typedef struct SArguments_S {
uint32_t num_of_CPR;
uint32_t num_of_threads;
uint64_t insert_interval;
+ uint64_t timestamp_step;
int64_t query_times;
uint32_t interlace_rows;
uint32_t num_of_RPR; // num_of_records_per_req
@@ -243,16 +253,15 @@ typedef struct SArguments_S {
typedef struct SColumn_S {
char field[TSDB_COL_NAME_LEN];
- char dataType[16];
+ char dataType[DATATYPE_BUFF_LEN];
uint32_t dataLen;
- char note[128];
+ char note[NOTE_BUFF_LEN];
} StrColumn;
typedef struct SSuperTable_S {
char sTblName[TSDB_TABLE_NAME_LEN];
- char dataSource[MAX_TB_NAME_SIZE]; // rand_gen or sample
- char childTblPrefix[TSDB_TABLE_NAME_LEN - 20]; // 20 characters reserved for seq
- char insertMode[MAX_TB_NAME_SIZE]; // taosc, rest
+ char dataSource[SMALL_BUFF_LEN]; // rand_gen or sample
+ char childTblPrefix[TBNAME_PREFIX_LEN];
uint16_t childTblExists;
int64_t childTblCount;
uint64_t batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql
@@ -271,7 +280,7 @@ typedef struct SSuperTable_S {
int64_t insertRows;
int64_t timeStampStep;
char startTimestamp[MAX_TB_NAME_SIZE];
- char sampleFormat[MAX_TB_NAME_SIZE]; // csv, json
+ char sampleFormat[SMALL_BUFF_LEN]; // csv, json
char sampleFile[MAX_FILE_NAME_LEN];
char tagsFile[MAX_FILE_NAME_LEN];
@@ -307,7 +316,7 @@ typedef struct {
int16_t replica;
int16_t quorum;
int16_t days;
- char keeplist[32];
+ char keeplist[64];
int32_t cache; //MB
int32_t blocks;
int32_t minrows;
@@ -316,7 +325,7 @@ typedef struct {
int32_t fsync;
int8_t comp;
int8_t cachelast;
- char precision[8]; // time resolution
+ char precision[SMALL_BUFF_LEN]; // time resolution
int8_t update;
char status[16];
} SDbInfo;
@@ -336,7 +345,7 @@ typedef struct SDbCfg_S {
int cache;
int blocks;
int quorum;
- char precision[8];
+ char precision[SMALL_BUFF_LEN];
} SDbCfg;
typedef struct SDataBase_S {
@@ -381,7 +390,7 @@ typedef struct SpecifiedQueryInfo_S {
uint64_t queryTimes;
bool subscribeRestart;
int subscribeKeepProgress;
- char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
+ char sql[MAX_QUERY_SQL_COUNT][BUFFER_SIZE+1];
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN];
int resubAfterConsume[MAX_QUERY_SQL_COUNT];
int endAfterConsume[MAX_QUERY_SQL_COUNT];
@@ -402,9 +411,9 @@ typedef struct SuperQueryInfo_S {
int subscribeKeepProgress;
uint64_t queryTimes;
int64_t childTblCount;
- char childTblPrefix[TSDB_TABLE_NAME_LEN - 20]; // 20 characters reserved for seq
+ char childTblPrefix[TBNAME_PREFIX_LEN]; // 20 characters reserved for seq
int sqlCount;
- char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
+ char sql[MAX_QUERY_SQL_COUNT][BUFFER_SIZE+1];
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN];
int resubAfterConsume;
int endAfterConsume;
@@ -422,7 +431,7 @@ typedef struct SQueryMetaInfo_S {
char user[MAX_USERNAME_SIZE];
char password[MAX_PASSWORD_SIZE];
char dbName[TSDB_DB_NAME_LEN];
- char queryMode[MAX_TB_NAME_SIZE]; // taosc, rest
+ char queryMode[SMALL_BUFF_LEN]; // taosc, rest
SpecifiedQueryInfo specifiedQueryInfo;
SuperQueryInfo superQueryInfo;
@@ -485,42 +494,42 @@ typedef unsigned __int32 uint32_t;
#pragma comment ( lib, "ws2_32.lib" )
// Some old MinGW/CYGWIN distributions don't define this:
#ifndef ENABLE_VIRTUAL_TERMINAL_PROCESSING
- #define ENABLE_VIRTUAL_TERMINAL_PROCESSING 0x0004
+#define ENABLE_VIRTUAL_TERMINAL_PROCESSING 0x0004
#endif // ENABLE_VIRTUAL_TERMINAL_PROCESSING
static HANDLE g_stdoutHandle;
static DWORD g_consoleMode;
static void setupForAnsiEscape(void) {
- DWORD mode = 0;
- g_stdoutHandle = GetStdHandle(STD_OUTPUT_HANDLE);
+ DWORD mode = 0;
+ g_stdoutHandle = GetStdHandle(STD_OUTPUT_HANDLE);
- if(g_stdoutHandle == INVALID_HANDLE_VALUE) {
- exit(GetLastError());
- }
+ if(g_stdoutHandle == INVALID_HANDLE_VALUE) {
+ exit(GetLastError());
+ }
- if(!GetConsoleMode(g_stdoutHandle, &mode)) {
- exit(GetLastError());
- }
+ if(!GetConsoleMode(g_stdoutHandle, &mode)) {
+ exit(GetLastError());
+ }
- g_consoleMode = mode;
+ g_consoleMode = mode;
- // Enable ANSI escape codes
- mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING;
+ // Enable ANSI escape codes
+ mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING;
- if(!SetConsoleMode(g_stdoutHandle, mode)) {
- exit(GetLastError());
- }
+ if(!SetConsoleMode(g_stdoutHandle, mode)) {
+ exit(GetLastError());
+ }
}
static void resetAfterAnsiEscape(void) {
- // Reset colors
- printf("\x1b[0m");
+ // Reset colors
+ printf("\x1b[0m");
- // Reset console mode
- if(!SetConsoleMode(g_stdoutHandle, g_consoleMode)) {
- exit(GetLastError());
- }
+ // Reset console mode
+ if(!SetConsoleMode(g_stdoutHandle, g_consoleMode)) {
+ exit(GetLastError());
+ }
}
static int taosRandom()
@@ -534,15 +543,15 @@ static int taosRandom()
static void setupForAnsiEscape(void) {}
static void resetAfterAnsiEscape(void) {
- // Reset colors
- printf("\x1b[0m");
+ // Reset colors
+ printf("\x1b[0m");
}
#include
static int taosRandom()
{
- return rand();
+ return rand();
}
#endif // ifdef Windows
@@ -560,11 +569,23 @@ static void init_rand_data();
/* ************ Global variables ************ */
-int32_t randint[MAX_PREPARED_RAND];
-int64_t randbigint[MAX_PREPARED_RAND];
-float randfloat[MAX_PREPARED_RAND];
-double randdouble[MAX_PREPARED_RAND];
-char *aggreFunc[] = {"*", "count(*)", "avg(col0)", "sum(col0)",
+int32_t g_randint[MAX_PREPARED_RAND];
+int64_t g_randbigint[MAX_PREPARED_RAND];
+float g_randfloat[MAX_PREPARED_RAND];
+double g_randdouble[MAX_PREPARED_RAND];
+
+char *g_randbool_buff = NULL;
+char *g_randint_buff = NULL;
+char *g_rand_voltage_buff = NULL;
+char *g_randbigint_buff = NULL;
+char *g_randsmallint_buff = NULL;
+char *g_randtinyint_buff = NULL;
+char *g_randfloat_buff = NULL;
+char *g_rand_current_buff = NULL;
+char *g_rand_phase_buff = NULL;
+char *g_randdouble_buff = NULL;
+
+char *g_aggreFunc[] = {"*", "count(*)", "avg(col0)", "sum(col0)",
"max(col0)", "min(col0)", "first(col0)", "last(col0)"};
#define DEFAULT_DATATYPE_NUM 3
@@ -605,6 +626,7 @@ SArguments g_args = {
4, // num_of_CPR
10, // num_of_connections/thread
0, // insert_interval
+ DEFAULT_TIMESTAMP_STEP, // timestamp_step
1, // query_times
0, // interlace_rows;
30000, // num_of_RPR
@@ -634,7 +656,7 @@ static FILE * g_fpOfInsertResult = NULL;
#define debugPrint(fmt, ...) \
do { if (g_args.debug_print || g_args.verbose_print) \
- fprintf(stderr, "DEBG: "fmt, __VA_ARGS__); } while(0)
+ fprintf(stderr, "DEBG: "fmt, __VA_ARGS__); } while(0)
#define verbosePrint(fmt, ...) \
do { if (g_args.verbose_print) \
@@ -645,7 +667,7 @@ static FILE * g_fpOfInsertResult = NULL;
fprintf(stderr, "PERF: "fmt, __VA_ARGS__); } while(0)
#define errorPrint(fmt, ...) \
- do { fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); } while(0)
+ do { fprintf(stderr, " \033[31m"); fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); fprintf(stderr, " \033[0m"); } while(0)
// for strncpy buffer overflow
#define min(a, b) (((a) < (b)) ? (a) : (b))
@@ -740,6 +762,9 @@ static void printHelp() {
"The number of threads. Default is 10.");
printf("%s%s%s%s\n", indent, "-i", indent,
"The sleep time (ms) between insertion. Default is 0.");
+ printf("%s%s%s%s%d.\n", indent, "-S", indent,
+ "The timestamp step between insertion. Default is ",
+ DEFAULT_TIMESTAMP_STEP);
printf("%s%s%s%s\n", indent, "-r", indent,
"The number of records per request. Default is 30000.");
printf("%s%s%s%s\n", indent, "-t", indent,
@@ -881,6 +906,14 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
exit(EXIT_FAILURE);
}
arguments->insert_interval = atoi(argv[++i]);
+ } else if (strcmp(argv[i], "-S") == 0) {
+ if ((argc == i+1) ||
+ (!isStringNumber(argv[i+1]))) {
+ printHelp();
+ errorPrint("\n\t%s%s", argv[i], " need a number following!\n");
+ exit(EXIT_FAILURE);
+ }
+ arguments->timestamp_step = atoi(argv[++i]);
} else if (strcmp(argv[i], "-qt") == 0) {
if ((argc == i+1)
|| (!isStringNumber(argv[i+1]))) {
@@ -1239,14 +1272,14 @@ static void fetchResult(TAOS_RES *res, threadInfo* pThreadInfo) {
// fetch the records row by row
while((row = taos_fetch_row(res))) {
- if (totalLen >= 100*1024*1024 - 32000) {
+ if (totalLen >= (100*1024*1024 - HEAD_BUFF_LEN*2)) {
if (strlen(pThreadInfo->filePath) > 0)
appendResultBufToFile(databuf, pThreadInfo);
totalLen = 0;
memset(databuf, 0, 100*1024*1024);
}
num_rows++;
- char temp[16000] = {0};
+ char temp[HEAD_BUFF_LEN] = {0};
int len = taos_print_row(temp, row, fields, num_fields);
len += sprintf(temp + len, "\n");
//printf("query result:%s\n", temp);
@@ -1294,67 +1327,144 @@ static void selectAndGetResult(
}
}
+static char *rand_bool_str(){
+ static int cursor;
+ cursor++;
+ if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
+ return g_randbool_buff + (cursor * BOOL_BUFF_LEN);
+}
+
static int32_t rand_bool(){
static int cursor;
cursor++;
cursor = cursor % MAX_PREPARED_RAND;
- return randint[cursor] % 2;
+ return g_randint[cursor] % 2;
+}
+
+static char *rand_tinyint_str()
+{
+ static int cursor;
+ cursor++;
+ if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
+ return g_randtinyint_buff + (cursor * TINYINT_BUFF_LEN);
}
-static int32_t rand_tinyint(){
+static int32_t rand_tinyint()
+{
static int cursor;
cursor++;
cursor = cursor % MAX_PREPARED_RAND;
- return randint[cursor] % 128;
+ return g_randint[cursor] % 128;
+}
+
+static char *rand_smallint_str()
+{
+ static int cursor;
+ cursor++;
+ if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
+ return g_randsmallint_buff + (cursor * SMALLINT_BUFF_LEN);
}
-static int32_t rand_smallint(){
+static int32_t rand_smallint()
+{
static int cursor;
cursor++;
cursor = cursor % MAX_PREPARED_RAND;
- return randint[cursor] % 32767;
+ return g_randint[cursor] % 32767;
}
-static int32_t rand_int(){
+static char *rand_int_str()
+{
+ static int cursor;
+ cursor++;
+ if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
+ return g_randint_buff + (cursor * INT_BUFF_LEN);
+}
+
+static int32_t rand_int()
+{
static int cursor;
cursor++;
cursor = cursor % MAX_PREPARED_RAND;
- return randint[cursor];
+ return g_randint[cursor];
+}
+
+static char *rand_bigint_str()
+{
+ static int cursor;
+ cursor++;
+ if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
+ return g_randbigint_buff + (cursor * BIGINT_BUFF_LEN);
}
-static int64_t rand_bigint(){
+static int64_t rand_bigint()
+{
static int cursor;
cursor++;
cursor = cursor % MAX_PREPARED_RAND;
- return randbigint[cursor];
+ return g_randbigint[cursor];
+}
+
+static char *rand_float_str()
+{
+ static int cursor;
+ cursor++;
+ if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
+ return g_randfloat_buff + (cursor * FLOAT_BUFF_LEN);
}
-static float rand_float(){
+static float rand_float()
+{
static int cursor;
cursor++;
cursor = cursor % MAX_PREPARED_RAND;
- return randfloat[cursor];
+ return g_randfloat[cursor];
+}
+
+static char *demo_current_float_str()
+{
+ static int cursor;
+ cursor++;
+ if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
+ return g_rand_current_buff + (cursor * FLOAT_BUFF_LEN);
}
-static float demo_current_float(){
+static float UNUSED_FUNC demo_current_float()
+{
static int cursor;
cursor++;
cursor = cursor % MAX_PREPARED_RAND;
- return (float)(9.8 + 0.04 * (randint[cursor] % 10) + randfloat[cursor]/1000000000);
+ return (float)(9.8 + 0.04 * (g_randint[cursor] % 10) + g_randfloat[cursor]/1000000000);
}
-static int32_t demo_voltage_int(){
+static char *demo_voltage_int_str()
+{
+ static int cursor;
+ cursor++;
+ if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
+ return g_rand_voltage_buff + (cursor * INT_BUFF_LEN);
+}
+
+static int32_t UNUSED_FUNC demo_voltage_int()
+{
static int cursor;
cursor++;
cursor = cursor % MAX_PREPARED_RAND;
- return 215 + randint[cursor] % 10;
+ return 215 + g_randint[cursor] % 10;
+}
+
+static char *demo_phase_float_str() {
+ static int cursor;
+ cursor++;
+ if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
+ return g_rand_phase_buff + (cursor * FLOAT_BUFF_LEN);
}
-static float demo_phase_float(){
+static float UNUSED_FUNC demo_phase_float(){
static int cursor;
cursor++;
cursor = cursor % MAX_PREPARED_RAND;
- return (float)((115 + randint[cursor] % 10 + randfloat[cursor]/1000000000)/360);
+ return (float)((115 + g_randint[cursor] % 10 + g_randfloat[cursor]/1000000000)/360);
}
#if 0
@@ -1363,14 +1473,14 @@ static const char charNum[] = "0123456789";
static void nonrand_string(char *, int) __attribute__ ((unused)); // reserve for debugging purpose
static void nonrand_string(char *str, int size)
{
- str[0] = 0;
- if (size > 0) {
- int n;
- for (n = 0; n < size; n++) {
- str[n] = charNum[n % 10];
- }
- str[n] = 0;
- }
+ str[0] = 0;
+ if (size > 0) {
+ int n;
+ for (n = 0; n < size; n++) {
+ str[n] = charNum[n % 10];
+ }
+ str[n] = 0;
+ }
}
#endif
@@ -1389,19 +1499,76 @@ static void rand_string(char *str, int size) {
}
}
-static double rand_double() {
+static char *rand_double_str()
+{
+ static int cursor;
+ cursor++;
+ if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
+ return g_randdouble_buff + (cursor * DOUBLE_BUFF_LEN);
+}
+
+static double rand_double()
+{
static int cursor;
cursor++;
cursor = cursor % MAX_PREPARED_RAND;
- return randdouble[cursor];
+ return g_randdouble[cursor];
}
static void init_rand_data() {
+
+ g_randint_buff = calloc(1, INT_BUFF_LEN * MAX_PREPARED_RAND);
+ assert(g_randint_buff);
+ g_rand_voltage_buff = calloc(1, INT_BUFF_LEN * MAX_PREPARED_RAND);
+ assert(g_rand_voltage_buff);
+ g_randbigint_buff = calloc(1, BIGINT_BUFF_LEN * MAX_PREPARED_RAND);
+ assert(g_randbigint_buff);
+ g_randsmallint_buff = calloc(1, SMALLINT_BUFF_LEN * MAX_PREPARED_RAND);
+ assert(g_randsmallint_buff);
+ g_randtinyint_buff = calloc(1, TINYINT_BUFF_LEN * MAX_PREPARED_RAND);
+ assert(g_randtinyint_buff);
+ g_randbool_buff = calloc(1, BOOL_BUFF_LEN * MAX_PREPARED_RAND);
+ assert(g_randbool_buff);
+ g_randfloat_buff = calloc(1, FLOAT_BUFF_LEN * MAX_PREPARED_RAND);
+ assert(g_randfloat_buff);
+ g_rand_current_buff = calloc(1, FLOAT_BUFF_LEN * MAX_PREPARED_RAND);
+ assert(g_rand_current_buff);
+ g_rand_phase_buff = calloc(1, FLOAT_BUFF_LEN * MAX_PREPARED_RAND);
+ assert(g_rand_phase_buff);
+ g_randdouble_buff = calloc(1, DOUBLE_BUFF_LEN * MAX_PREPARED_RAND);
+ assert(g_randdouble_buff);
+
for (int i = 0; i < MAX_PREPARED_RAND; i++){
- randint[i] = (int)(taosRandom() % 65535);
- randbigint[i] = (int64_t)(taosRandom() % 2147483648);
- randfloat[i] = (float)(taosRandom() / 1000.0);
- randdouble[i] = (double)(taosRandom() / 1000000.0);
+ g_randint[i] = (int)(taosRandom() % 65535);
+ sprintf(g_randint_buff + i * INT_BUFF_LEN, "%d",
+ g_randint[i]);
+ sprintf(g_rand_voltage_buff + i * INT_BUFF_LEN, "%d",
+ 215 + g_randint[i] % 10);
+
+ sprintf(g_randbool_buff + i * BOOL_BUFF_LEN, "%s",
+ ((g_randint[i] % 2) & 1)?"true":"false");
+ sprintf(g_randsmallint_buff + i * SMALLINT_BUFF_LEN, "%d",
+ g_randint[i] % 32767);
+ sprintf(g_randtinyint_buff + i * TINYINT_BUFF_LEN, "%d",
+ g_randint[i] % 128);
+
+ g_randbigint[i] = (int64_t)(taosRandom() % 2147483648);
+ sprintf(g_randbigint_buff + i * BIGINT_BUFF_LEN, "%"PRId64"",
+ g_randbigint[i]);
+
+ g_randfloat[i] = (float)(taosRandom() / 1000.0);
+ sprintf(g_randfloat_buff + i * FLOAT_BUFF_LEN, "%f",
+ g_randfloat[i]);
+ sprintf(g_rand_current_buff + i * FLOAT_BUFF_LEN, "%f",
+ (float)(9.8 + 0.04 * (g_randint[i] % 10)
+ + g_randfloat[i]/1000000000));
+ sprintf(g_rand_phase_buff + i * FLOAT_BUFF_LEN, "%f",
+ (float)((115 + g_randint[i] % 10
+ + g_randfloat[i]/1000000000)/360));
+
+ g_randdouble[i] = (double)(taosRandom() / 1000000.0);
+ sprintf(g_randdouble_buff + i * DOUBLE_BUFF_LEN, "%f",
+ g_randdouble[i]);
}
}
@@ -1436,8 +1603,8 @@ static int printfInsertMeta() {
if (g_args.iface != INTERFACE_BUT) {
// first time if no iface specified
printf("interface: \033[33m%s\033[0m\n",
- (g_args.iface==TAOSC_IFACE)?"taosc":
- (g_args.iface==REST_IFACE)?"rest":"stmt");
+ (g_args.iface==TAOSC_IFACE)?"taosc":
+ (g_args.iface==REST_IFACE)?"rest":"stmt");
}
printf("host: \033[33m%s:%u\033[0m\n",
@@ -2151,15 +2318,15 @@ static void printfDbInfoForQueryToFile(
}
static void printfQuerySystemInfo(TAOS * taos) {
- char filename[MAX_QUERY_SQL_LENGTH+1] = {0};
- char buffer[MAX_QUERY_SQL_LENGTH+1] = {0};
+ char filename[BUFFER_SIZE+1] = {0};
+ char buffer[BUFFER_SIZE+1] = {0};
TAOS_RES* res;
time_t t;
struct tm* lt;
time(&t);
lt = localtime(&t);
- snprintf(filename, MAX_QUERY_SQL_LENGTH, "querySystemInfo-%d-%d-%d %d:%d:%d",
+ snprintf(filename, BUFFER_SIZE, "querySystemInfo-%d-%d-%d %d:%d:%d",
lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min,
lt->tm_sec);
@@ -2191,12 +2358,12 @@ static void printfQuerySystemInfo(TAOS * taos) {
printfDbInfoForQueryToFile(filename, dbInfos[i], i);
// show db.vgroups
- snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.vgroups;", dbInfos[i]->name);
+ snprintf(buffer, BUFFER_SIZE, "show %s.vgroups;", dbInfos[i]->name);
res = taos_query(taos, buffer);
xDumpResultToFile(filename, res);
// show db.stables
- snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.stables;", dbInfos[i]->name);
+ snprintf(buffer, BUFFER_SIZE, "show %s.stables;", dbInfos[i]->name);
res = taos_query(taos, buffer);
xDumpResultToFile(filename, res);
free(dbInfos[i]);
@@ -2221,24 +2388,24 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port
request_buf = malloc(req_buf_len);
if (NULL == request_buf) {
- errorPrint("%s", "ERROR, cannot allocate memory.\n");
- exit(EXIT_FAILURE);
+ errorPrint("%s", "ERROR, cannot allocate memory.\n");
+ exit(EXIT_FAILURE);
}
char userpass_buf[INPUT_BUF_LEN];
int mod_table[] = {0, 2, 1};
static char base64[] = {'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
- 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P',
- 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X',
- 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f',
- 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',
- 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
- 'w', 'x', 'y', 'z', '0', '1', '2', '3',
- '4', '5', '6', '7', '8', '9', '+', '/'};
+ 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P',
+ 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X',
+ 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f',
+ 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',
+ 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
+ 'w', 'x', 'y', 'z', '0', '1', '2', '3',
+ '4', '5', '6', '7', '8', '9', '+', '/'};
snprintf(userpass_buf, INPUT_BUF_LEN, "%s:%s",
- g_Dbs.user, g_Dbs.password);
+ g_Dbs.user, g_Dbs.password);
size_t userpass_buf_len = strlen(userpass_buf);
size_t encoded_len = 4 * ((userpass_buf_len +2) / 3);
@@ -2270,22 +2437,22 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port
memset(base64_buf, 0, INPUT_BUF_LEN);
for (int n = 0, m = 0; n < userpass_buf_len;) {
- uint32_t oct_a = n < userpass_buf_len ?
- (unsigned char) userpass_buf[n++]:0;
- uint32_t oct_b = n < userpass_buf_len ?
- (unsigned char) userpass_buf[n++]:0;
- uint32_t oct_c = n < userpass_buf_len ?
- (unsigned char) userpass_buf[n++]:0;
- uint32_t triple = (oct_a << 0x10) + (oct_b << 0x08) + oct_c;
+ uint32_t oct_a = n < userpass_buf_len ?
+ (unsigned char) userpass_buf[n++]:0;
+ uint32_t oct_b = n < userpass_buf_len ?
+ (unsigned char) userpass_buf[n++]:0;
+ uint32_t oct_c = n < userpass_buf_len ?
+ (unsigned char) userpass_buf[n++]:0;
+ uint32_t triple = (oct_a << 0x10) + (oct_b << 0x08) + oct_c;
- base64_buf[m++] = base64[(triple >> 3* 6) & 0x3f];
- base64_buf[m++] = base64[(triple >> 2* 6) & 0x3f];
- base64_buf[m++] = base64[(triple >> 1* 6) & 0x3f];
- base64_buf[m++] = base64[(triple >> 0* 6) & 0x3f];
+ base64_buf[m++] = base64[(triple >> 3* 6) & 0x3f];
+ base64_buf[m++] = base64[(triple >> 2* 6) & 0x3f];
+ base64_buf[m++] = base64[(triple >> 1* 6) & 0x3f];
+ base64_buf[m++] = base64[(triple >> 0* 6) & 0x3f];
}
for (int l = 0; l < mod_table[userpass_buf_len % 3]; l++)
- base64_buf[encoded_len - 1 - l] = '=';
+ base64_buf[encoded_len - 1 - l] = '=';
debugPrint("%s() LN%d: auth string base64 encoded: %s\n",
__func__, __LINE__, base64_buf);
@@ -2343,7 +2510,7 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port
printf("Response:\n%s\n", response_buf);
if (strlen(pThreadInfo->filePath) > 0) {
- appendResultBufToFile(response_buf, pThreadInfo);
+ appendResultBufToFile(response_buf, pThreadInfo);
}
free(request_buf);
@@ -2425,11 +2592,11 @@ static char* generateTagValuesForStb(SSuperTable* stbInfo, int64_t tableSeq) {
if ((g_args.demo_mode) && (i == 0)) {
dataLen += snprintf(dataBuf + dataLen,
TSDB_MAX_SQL_LEN - dataLen,
- "%"PRId64",", tableSeq % 10);
+ "%"PRId64",", tableSeq % 10);
} else {
dataLen += snprintf(dataBuf + dataLen,
TSDB_MAX_SQL_LEN - dataLen,
- "%"PRId64",", tableSeq);
+ "%"PRId64",", tableSeq);
}
} else if (0 == strncasecmp(stbInfo->tags[i].dataType,
"bigint", strlen("bigint"))) {
@@ -2472,72 +2639,72 @@ static char* generateTagValuesForStb(SSuperTable* stbInfo, int64_t tableSeq) {
}
static int calcRowLen(SSuperTable* superTbls) {
- int colIndex;
- int lenOfOneRow = 0;
-
- for (colIndex = 0; colIndex < superTbls->columnCount; colIndex++) {
- char* dataType = superTbls->columns[colIndex].dataType;
-
- if (strcasecmp(dataType, "BINARY") == 0) {
- lenOfOneRow += superTbls->columns[colIndex].dataLen + 3;
- } else if (strcasecmp(dataType, "NCHAR") == 0) {
- lenOfOneRow += superTbls->columns[colIndex].dataLen + 3;
- } else if (strcasecmp(dataType, "INT") == 0) {
- lenOfOneRow += 11;
- } else if (strcasecmp(dataType, "BIGINT") == 0) {
- lenOfOneRow += 21;
- } else if (strcasecmp(dataType, "SMALLINT") == 0) {
- lenOfOneRow += 6;
- } else if (strcasecmp(dataType, "TINYINT") == 0) {
- lenOfOneRow += 4;
- } else if (strcasecmp(dataType, "BOOL") == 0) {
- lenOfOneRow += 6;
- } else if (strcasecmp(dataType, "FLOAT") == 0) {
- lenOfOneRow += 22;
- } else if (strcasecmp(dataType, "DOUBLE") == 0) {
- lenOfOneRow += 42;
- } else if (strcasecmp(dataType, "TIMESTAMP") == 0) {
- lenOfOneRow += 21;
- } else {
- printf("get error data type : %s\n", dataType);
- exit(-1);
- }
- }
-
- superTbls->lenOfOneRow = lenOfOneRow + 20; // timestamp
-
- int tagIndex;
- int lenOfTagOfOneRow = 0;
- for (tagIndex = 0; tagIndex < superTbls->tagCount; tagIndex++) {
- char* dataType = superTbls->tags[tagIndex].dataType;
-
- if (strcasecmp(dataType, "BINARY") == 0) {
- lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3;
- } else if (strcasecmp(dataType, "NCHAR") == 0) {
- lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3;
- } else if (strcasecmp(dataType, "INT") == 0) {
- lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 11;
- } else if (strcasecmp(dataType, "BIGINT") == 0) {
- lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 21;
- } else if (strcasecmp(dataType, "SMALLINT") == 0) {
- lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6;
- } else if (strcasecmp(dataType, "TINYINT") == 0) {
- lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 4;
- } else if (strcasecmp(dataType, "BOOL") == 0) {
- lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6;
- } else if (strcasecmp(dataType, "FLOAT") == 0) {
- lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 22;
- } else if (strcasecmp(dataType, "DOUBLE") == 0) {
- lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 42;
- } else {
- printf("get error tag type : %s\n", dataType);
- exit(-1);
+ int colIndex;
+ int lenOfOneRow = 0;
+
+ for (colIndex = 0; colIndex < superTbls->columnCount; colIndex++) {
+ char* dataType = superTbls->columns[colIndex].dataType;
+
+ if (strcasecmp(dataType, "BINARY") == 0) {
+ lenOfOneRow += superTbls->columns[colIndex].dataLen + 3;
+ } else if (strcasecmp(dataType, "NCHAR") == 0) {
+ lenOfOneRow += superTbls->columns[colIndex].dataLen + 3;
+ } else if (strcasecmp(dataType, "INT") == 0) {
+ lenOfOneRow += INT_BUFF_LEN;
+ } else if (strcasecmp(dataType, "BIGINT") == 0) {
+ lenOfOneRow += BIGINT_BUFF_LEN;
+ } else if (strcasecmp(dataType, "SMALLINT") == 0) {
+ lenOfOneRow += SMALLINT_BUFF_LEN;
+ } else if (strcasecmp(dataType, "TINYINT") == 0) {
+ lenOfOneRow += TINYINT_BUFF_LEN;
+ } else if (strcasecmp(dataType, "BOOL") == 0) {
+ lenOfOneRow += BOOL_BUFF_LEN;
+ } else if (strcasecmp(dataType, "FLOAT") == 0) {
+ lenOfOneRow += FLOAT_BUFF_LEN;
+ } else if (strcasecmp(dataType, "DOUBLE") == 0) {
+ lenOfOneRow += DOUBLE_BUFF_LEN;
+ } else if (strcasecmp(dataType, "TIMESTAMP") == 0) {
+ lenOfOneRow += TIMESTAMP_BUFF_LEN;
+ } else {
+ printf("get error data type : %s\n", dataType);
+ exit(-1);
+ }
+ }
+
+ superTbls->lenOfOneRow = lenOfOneRow + 20; // timestamp
+
+ int tagIndex;
+ int lenOfTagOfOneRow = 0;
+ for (tagIndex = 0; tagIndex < superTbls->tagCount; tagIndex++) {
+ char* dataType = superTbls->tags[tagIndex].dataType;
+
+ if (strcasecmp(dataType, "BINARY") == 0) {
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3;
+ } else if (strcasecmp(dataType, "NCHAR") == 0) {
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3;
+ } else if (strcasecmp(dataType, "INT") == 0) {
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + INT_BUFF_LEN;
+ } else if (strcasecmp(dataType, "BIGINT") == 0) {
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + BIGINT_BUFF_LEN;
+ } else if (strcasecmp(dataType, "SMALLINT") == 0) {
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + SMALLINT_BUFF_LEN;
+ } else if (strcasecmp(dataType, "TINYINT") == 0) {
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + TINYINT_BUFF_LEN;
+ } else if (strcasecmp(dataType, "BOOL") == 0) {
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + BOOL_BUFF_LEN;
+ } else if (strcasecmp(dataType, "FLOAT") == 0) {
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + FLOAT_BUFF_LEN;
+ } else if (strcasecmp(dataType, "DOUBLE") == 0) {
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + DOUBLE_BUFF_LEN;
+ } else {
+ printf("get error tag type : %s\n", dataType);
+ exit(-1);
+ }
}
- }
- superTbls->lenOfTagOfOneRow = lenOfTagOfOneRow;
+ superTbls->lenOfTagOfOneRow = lenOfTagOfOneRow;
- return 0;
+ return 0;
}
@@ -2545,84 +2712,84 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
char* dbName, char* sTblName, char** childTblNameOfSuperTbl,
int64_t* childTblCountOfSuperTbl, int64_t limit, uint64_t offset) {
- char command[BUFFER_SIZE] = "\0";
- char limitBuf[100] = "\0";
+ char command[BUFFER_SIZE] = "\0";
+ char limitBuf[100] = "\0";
- TAOS_RES * res;
- TAOS_ROW row = NULL;
+ TAOS_RES * res;
+ TAOS_ROW row = NULL;
- char* childTblName = *childTblNameOfSuperTbl;
+ char* childTblName = *childTblNameOfSuperTbl;
- if (offset >= 0) {
- snprintf(limitBuf, 100, " limit %"PRId64" offset %"PRIu64"",
- limit, offset);
- }
+ if (offset >= 0) {
+ snprintf(limitBuf, 100, " limit %"PRId64" offset %"PRIu64"",
+ limit, offset);
+ }
- //get all child table name use cmd: select tbname from superTblName;
- snprintf(command, BUFFER_SIZE, "select tbname from %s.%s %s",
- dbName, sTblName, limitBuf);
+ //get all child table name use cmd: select tbname from superTblName;
+ snprintf(command, BUFFER_SIZE, "select tbname from %s.%s %s",
+ dbName, sTblName, limitBuf);
- res = taos_query(taos, command);
- int32_t code = taos_errno(res);
- if (code != 0) {
- taos_free_result(res);
- taos_close(taos);
- errorPrint("%s() LN%d, failed to run command %s\n",
- __func__, __LINE__, command);
- exit(-1);
- }
-
- int64_t childTblCount = (limit < 0)?10000:limit;
- int64_t count = 0;
- if (childTblName == NULL) {
- childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN);
- if (NULL == childTblName) {
- taos_free_result(res);
+ res = taos_query(taos, command);
+ int32_t code = taos_errno(res);
+ if (code != 0) {
+ taos_free_result(res);
taos_close(taos);
- errorPrint("%s() LN%d, failed to allocate memory!\n", __func__, __LINE__);
+ errorPrint("%s() LN%d, failed to run command %s\n",
+ __func__, __LINE__, command);
exit(-1);
}
- }
-
- char* pTblName = childTblName;
- while((row = taos_fetch_row(res)) != NULL) {
- int32_t* len = taos_fetch_lengths(res);
- if (0 == strlen((char *)row[0])) {
- errorPrint("%s() LN%d, No.%"PRId64" table return empty name\n",
- __func__, __LINE__, count);
- exit(-1);
+ int64_t childTblCount = (limit < 0)?10000:limit;
+ int64_t count = 0;
+ if (childTblName == NULL) {
+ childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN);
+ if (NULL == childTblName) {
+ taos_free_result(res);
+ taos_close(taos);
+ errorPrint("%s() LN%d, failed to allocate memory!\n", __func__, __LINE__);
+ exit(-1);
+ }
}
- tstrncpy(pTblName, (char *)row[0], len[0]+1);
- //printf("==== sub table name: %s\n", pTblName);
- count++;
- if (count >= childTblCount - 1) {
- char *tmp = realloc(childTblName,
- (size_t)childTblCount*1.5*TSDB_TABLE_NAME_LEN+1);
- if (tmp != NULL) {
- childTblName = tmp;
- childTblCount = (int)(childTblCount*1.5);
- memset(childTblName + count*TSDB_TABLE_NAME_LEN, 0,
- (size_t)((childTblCount-count)*TSDB_TABLE_NAME_LEN));
- } else {
- // exit, if allocate more memory failed
- errorPrint("%s() LN%d, realloc fail for save child table name of %s.%s\n",
- __func__, __LINE__, dbName, sTblName);
- tmfree(childTblName);
- taos_free_result(res);
- taos_close(taos);
- exit(-1);
- }
+ char* pTblName = childTblName;
+ while((row = taos_fetch_row(res)) != NULL) {
+ int32_t* len = taos_fetch_lengths(res);
+
+ if (0 == strlen((char *)row[0])) {
+ errorPrint("%s() LN%d, No.%"PRId64" table return empty name\n",
+ __func__, __LINE__, count);
+ exit(-1);
+ }
+
+ tstrncpy(pTblName, (char *)row[0], len[0]+1);
+ //printf("==== sub table name: %s\n", pTblName);
+ count++;
+ if (count >= childTblCount - 1) {
+ char *tmp = realloc(childTblName,
+ (size_t)childTblCount*1.5*TSDB_TABLE_NAME_LEN+1);
+ if (tmp != NULL) {
+ childTblName = tmp;
+ childTblCount = (int)(childTblCount*1.5);
+ memset(childTblName + count*TSDB_TABLE_NAME_LEN, 0,
+ (size_t)((childTblCount-count)*TSDB_TABLE_NAME_LEN));
+ } else {
+ // exit, if allocate more memory failed
+ errorPrint("%s() LN%d, realloc fail for save child table name of %s.%s\n",
+ __func__, __LINE__, dbName, sTblName);
+ tmfree(childTblName);
+ taos_free_result(res);
+ taos_close(taos);
+ exit(-1);
+ }
+ }
+ pTblName = childTblName + count * TSDB_TABLE_NAME_LEN;
}
- pTblName = childTblName + count * TSDB_TABLE_NAME_LEN;
- }
- *childTblCountOfSuperTbl = count;
- *childTblNameOfSuperTbl = childTblName;
+ *childTblCountOfSuperTbl = count;
+ *childTblNameOfSuperTbl = childTblName;
- taos_free_result(res);
- return 0;
+ taos_free_result(res);
+ return 0;
}
static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName,
@@ -2637,82 +2804,86 @@ static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName,
static int getSuperTableFromServer(TAOS * taos, char* dbName,
SSuperTable* superTbls) {
- char command[BUFFER_SIZE] = "\0";
- TAOS_RES * res;
- TAOS_ROW row = NULL;
- int count = 0;
-
- //get schema use cmd: describe superTblName;
- snprintf(command, BUFFER_SIZE, "describe %s.%s", dbName, superTbls->sTblName);
- res = taos_query(taos, command);
- int32_t code = taos_errno(res);
- if (code != 0) {
- printf("failed to run command %s\n", command);
+ char command[BUFFER_SIZE] = "\0";
+ TAOS_RES * res;
+ TAOS_ROW row = NULL;
+ int count = 0;
+
+ //get schema use cmd: describe superTblName;
+ snprintf(command, BUFFER_SIZE, "describe %s.%s", dbName, superTbls->sTblName);
+ res = taos_query(taos, command);
+ int32_t code = taos_errno(res);
+ if (code != 0) {
+ printf("failed to run command %s\n", command);
+ taos_free_result(res);
+ return -1;
+ }
+
+ int tagIndex = 0;
+ int columnIndex = 0;
+ TAOS_FIELD *fields = taos_fetch_fields(res);
+ while((row = taos_fetch_row(res)) != NULL) {
+ if (0 == count) {
+ count++;
+ continue;
+ }
+
+ if (strcmp((char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], "TAG") == 0) {
+ tstrncpy(superTbls->tags[tagIndex].field,
+ (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
+ fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
+ tstrncpy(superTbls->tags[tagIndex].dataType,
+ (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ min(DATATYPE_BUFF_LEN,
+ fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1);
+ superTbls->tags[tagIndex].dataLen =
+ *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
+ tstrncpy(superTbls->tags[tagIndex].note,
+ (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
+ min(NOTE_BUFF_LEN,
+ fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes) + 1);
+ tagIndex++;
+ } else {
+ tstrncpy(superTbls->columns[columnIndex].field,
+ (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
+ fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
+ tstrncpy(superTbls->columns[columnIndex].dataType,
+ (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ min(DATATYPE_BUFF_LEN,
+ fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1);
+ superTbls->columns[columnIndex].dataLen =
+ *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
+ tstrncpy(superTbls->columns[columnIndex].note,
+ (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
+ min(NOTE_BUFF_LEN,
+ fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes) + 1);
+ columnIndex++;
+ }
+ count++;
+ }
+
+ superTbls->columnCount = columnIndex;
+ superTbls->tagCount = tagIndex;
taos_free_result(res);
- return -1;
- }
-
- int tagIndex = 0;
- int columnIndex = 0;
- TAOS_FIELD *fields = taos_fetch_fields(res);
- while((row = taos_fetch_row(res)) != NULL) {
- if (0 == count) {
- count++;
- continue;
- }
-
- if (strcmp((char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], "TAG") == 0) {
- tstrncpy(superTbls->tags[tagIndex].field,
- (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
- fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
- tstrncpy(superTbls->tags[tagIndex].dataType,
- (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
- min(15, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes));
- superTbls->tags[tagIndex].dataLen =
- *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
- tstrncpy(superTbls->tags[tagIndex].note,
- (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
- fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes);
- tagIndex++;
- } else {
- tstrncpy(superTbls->columns[columnIndex].field,
- (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
- fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
- tstrncpy(superTbls->columns[columnIndex].dataType,
- (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
- min(15, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes));
- superTbls->columns[columnIndex].dataLen =
- *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
- tstrncpy(superTbls->columns[columnIndex].note,
- (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
- fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes);
- columnIndex++;
- }
- count++;
- }
-
- superTbls->columnCount = columnIndex;
- superTbls->tagCount = tagIndex;
- taos_free_result(res);
-
- calcRowLen(superTbls);
-/*
- if (TBL_ALREADY_EXISTS == superTbls->childTblExists) {
+ calcRowLen(superTbls);
+
+ /*
+ if (TBL_ALREADY_EXISTS == superTbls->childTblExists) {
//get all child table name use cmd: select tbname from superTblName;
int childTblCount = 10000;
superTbls->childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN);
if (superTbls->childTblName == NULL) {
- errorPrint("%s() LN%d, alloc memory failed!\n", __func__, __LINE__);
- return -1;
+ errorPrint("%s() LN%d, alloc memory failed!\n", __func__, __LINE__);
+ return -1;
}
getAllChildNameOfSuperTable(taos, dbName,
- superTbls->sTblName,
- &superTbls->childTblName,
- &superTbls->childTblCount);
- }
- */
- return 0;
+ superTbls->sTblName,
+ &superTbls->childTblName,
+ &superTbls->childTblCount);
+ }
+ */
+ return 0;
}
static int createSuperTable(
@@ -2748,26 +2919,26 @@ static int createSuperTable(
lenOfOneRow += superTbl->columns[colIndex].dataLen + 3;
} else if (strcasecmp(dataType, "INT") == 0) {
if ((g_args.demo_mode) && (colIndex == 1)) {
- len += snprintf(cols + len, COL_BUFFER_LEN - len,
- ", VOLTAGE INT");
+ len += snprintf(cols + len, COL_BUFFER_LEN - len,
+ ", VOLTAGE INT");
} else {
len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "INT");
}
- lenOfOneRow += 11;
+ lenOfOneRow += INT_BUFF_LEN;
} else if (strcasecmp(dataType, "BIGINT") == 0) {
len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s",
colIndex, "BIGINT");
- lenOfOneRow += 21;
+ lenOfOneRow += BIGINT_BUFF_LEN;
} else if (strcasecmp(dataType, "SMALLINT") == 0) {
len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s",
colIndex, "SMALLINT");
- lenOfOneRow += 6;
+ lenOfOneRow += SMALLINT_BUFF_LEN;
} else if (strcasecmp(dataType, "TINYINT") == 0) {
len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "TINYINT");
- lenOfOneRow += 4;
+ lenOfOneRow += TINYINT_BUFF_LEN;
} else if (strcasecmp(dataType, "BOOL") == 0) {
len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "BOOL");
- lenOfOneRow += 6;
+ lenOfOneRow += BOOL_BUFF_LEN;
} else if (strcasecmp(dataType, "FLOAT") == 0) {
if (g_args.demo_mode) {
if (colIndex == 0) {
@@ -2779,15 +2950,15 @@ static int createSuperTable(
len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "FLOAT");
}
- lenOfOneRow += 22;
+ lenOfOneRow += FLOAT_BUFF_LEN;
} else if (strcasecmp(dataType, "DOUBLE") == 0) {
len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s",
colIndex, "DOUBLE");
- lenOfOneRow += 42;
+ lenOfOneRow += DOUBLE_BUFF_LEN;
} else if (strcasecmp(dataType, "TIMESTAMP") == 0) {
len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s",
colIndex, "TIMESTAMP");
- lenOfOneRow += 21;
+ lenOfOneRow += TIMESTAMP_BUFF_LEN;
} else {
taos_close(taos);
errorPrint("%s() LN%d, config error data type : %s\n",
@@ -2850,31 +3021,31 @@ static int createSuperTable(
len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
"T%d %s,", tagIndex, "INT");
}
- lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 11;
+ lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + INT_BUFF_LEN;
} else if (strcasecmp(dataType, "BIGINT") == 0) {
len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
"T%d %s,", tagIndex, "BIGINT");
- lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 21;
+ lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + BIGINT_BUFF_LEN;
} else if (strcasecmp(dataType, "SMALLINT") == 0) {
len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
"T%d %s,", tagIndex, "SMALLINT");
- lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 6;
+ lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + SMALLINT_BUFF_LEN;
} else if (strcasecmp(dataType, "TINYINT") == 0) {
len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
"T%d %s,", tagIndex, "TINYINT");
- lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 4;
+ lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + TINYINT_BUFF_LEN;
} else if (strcasecmp(dataType, "BOOL") == 0) {
len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
"T%d %s,", tagIndex, "BOOL");
- lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 6;
+ lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + BOOL_BUFF_LEN;
} else if (strcasecmp(dataType, "FLOAT") == 0) {
len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
"T%d %s,", tagIndex, "FLOAT");
- lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 22;
+ lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + FLOAT_BUFF_LEN;
} else if (strcasecmp(dataType, "DOUBLE") == 0) {
len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
"T%d %s,", tagIndex, "DOUBLE");
- lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 42;
+ lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + DOUBLE_BUFF_LEN;
} else {
taos_close(taos);
errorPrint("%s() LN%d, config error tag type : %s\n",
@@ -3221,7 +3392,7 @@ static void createChildTables() {
continue;
}
verbosePrint("%s() LN%d: %s\n", __func__, __LINE__,
- g_Dbs.db[i].superTbls[j].colsOfCreateChildTable);
+ g_Dbs.db[i].superTbls[j].colsOfCreateChildTable);
uint64_t startFrom = 0;
g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount;
@@ -3269,298 +3440,301 @@ static void createChildTables() {
}
/*
- Read 10000 lines at most. If more than 10000 lines, continue to read after using
-*/
+ Read 10000 lines at most. If more than 10000 lines, continue to read after using
+ */
static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) {
- size_t n = 0;
- ssize_t readLen = 0;
- char * line = NULL;
-
- FILE *fp = fopen(superTblInfo->tagsFile, "r");
- if (fp == NULL) {
- printf("Failed to open tags file: %s, reason:%s\n",
- superTblInfo->tagsFile, strerror(errno));
- return -1;
- }
-
- if (superTblInfo->tagDataBuf) {
- free(superTblInfo->tagDataBuf);
- superTblInfo->tagDataBuf = NULL;
- }
-
- int tagCount = 10000;
- int count = 0;
- char* tagDataBuf = calloc(1, superTblInfo->lenOfTagOfOneRow * tagCount);
- if (tagDataBuf == NULL) {
- printf("Failed to calloc, reason:%s\n", strerror(errno));
- fclose(fp);
- return -1;
- }
+ size_t n = 0;
+ ssize_t readLen = 0;
+ char * line = NULL;
- while((readLen = tgetline(&line, &n, fp)) != -1) {
- if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) {
- line[--readLen] = 0;
+ FILE *fp = fopen(superTblInfo->tagsFile, "r");
+ if (fp == NULL) {
+ printf("Failed to open tags file: %s, reason:%s\n",
+ superTblInfo->tagsFile, strerror(errno));
+ return -1;
}
- if (readLen == 0) {
- continue;
+ if (superTblInfo->tagDataBuf) {
+ free(superTblInfo->tagDataBuf);
+ superTblInfo->tagDataBuf = NULL;
}
- memcpy(tagDataBuf + count * superTblInfo->lenOfTagOfOneRow, line, readLen);
- count++;
-
- if (count >= tagCount - 1) {
- char *tmp = realloc(tagDataBuf,
- (size_t)tagCount*1.5*superTblInfo->lenOfTagOfOneRow);
- if (tmp != NULL) {
- tagDataBuf = tmp;
- tagCount = (int)(tagCount*1.5);
- memset(tagDataBuf + count*superTblInfo->lenOfTagOfOneRow,
- 0, (size_t)((tagCount-count)*superTblInfo->lenOfTagOfOneRow));
- } else {
- // exit, if allocate more memory failed
- printf("realloc fail for save tag val from %s\n", superTblInfo->tagsFile);
- tmfree(tagDataBuf);
- free(line);
+ int tagCount = 10000;
+ int count = 0;
+ char* tagDataBuf = calloc(1, superTblInfo->lenOfTagOfOneRow * tagCount);
+ if (tagDataBuf == NULL) {
+ printf("Failed to calloc, reason:%s\n", strerror(errno));
fclose(fp);
return -1;
- }
}
- }
- superTblInfo->tagDataBuf = tagDataBuf;
- superTblInfo->tagSampleCount = count;
+ while((readLen = tgetline(&line, &n, fp)) != -1) {
+ if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) {
+ line[--readLen] = 0;
+ }
+
+ if (readLen == 0) {
+ continue;
+ }
+
+ memcpy(tagDataBuf + count * superTblInfo->lenOfTagOfOneRow, line, readLen);
+ count++;
+
+ if (count >= tagCount - 1) {
+ char *tmp = realloc(tagDataBuf,
+ (size_t)tagCount*1.5*superTblInfo->lenOfTagOfOneRow);
+ if (tmp != NULL) {
+ tagDataBuf = tmp;
+ tagCount = (int)(tagCount*1.5);
+ memset(tagDataBuf + count*superTblInfo->lenOfTagOfOneRow,
+ 0, (size_t)((tagCount-count)*superTblInfo->lenOfTagOfOneRow));
+ } else {
+ // exit, if allocate more memory failed
+ printf("realloc fail for save tag val from %s\n", superTblInfo->tagsFile);
+ tmfree(tagDataBuf);
+ free(line);
+ fclose(fp);
+ return -1;
+ }
+ }
+ }
+
+ superTblInfo->tagDataBuf = tagDataBuf;
+ superTblInfo->tagSampleCount = count;
- free(line);
- fclose(fp);
- return 0;
+ free(line);
+ fclose(fp);
+ return 0;
}
/*
- Read 10000 lines at most. If more than 10000 lines, continue to read after using
-*/
+ Read 10000 lines at most. If more than 10000 lines, continue to read after using
+ */
static int readSampleFromCsvFileToMem(
SSuperTable* superTblInfo) {
- size_t n = 0;
- ssize_t readLen = 0;
- char * line = NULL;
- int getRows = 0;
-
- FILE* fp = fopen(superTblInfo->sampleFile, "r");
- if (fp == NULL) {
- errorPrint( "Failed to open sample file: %s, reason:%s\n",
- superTblInfo->sampleFile, strerror(errno));
- return -1;
- }
-
- assert(superTblInfo->sampleDataBuf);
- memset(superTblInfo->sampleDataBuf, 0,
- MAX_SAMPLES_ONCE_FROM_FILE * superTblInfo->lenOfOneRow);
- while(1) {
- readLen = tgetline(&line, &n, fp);
- if (-1 == readLen) {
- if(0 != fseek(fp, 0, SEEK_SET)) {
- errorPrint( "Failed to fseek file: %s, reason:%s\n",
+ size_t n = 0;
+ ssize_t readLen = 0;
+ char * line = NULL;
+ int getRows = 0;
+
+ FILE* fp = fopen(superTblInfo->sampleFile, "r");
+ if (fp == NULL) {
+ errorPrint( "Failed to open sample file: %s, reason:%s\n",
superTblInfo->sampleFile, strerror(errno));
- fclose(fp);
return -1;
- }
- continue;
}
- if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) {
- line[--readLen] = 0;
- }
+ assert(superTblInfo->sampleDataBuf);
+ memset(superTblInfo->sampleDataBuf, 0,
+ MAX_SAMPLES_ONCE_FROM_FILE * superTblInfo->lenOfOneRow);
+ while(1) {
+ readLen = tgetline(&line, &n, fp);
+ if (-1 == readLen) {
+ if(0 != fseek(fp, 0, SEEK_SET)) {
+ errorPrint( "Failed to fseek file: %s, reason:%s\n",
+ superTblInfo->sampleFile, strerror(errno));
+ fclose(fp);
+ return -1;
+ }
+ continue;
+ }
- if (readLen == 0) {
- continue;
- }
+ if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) {
+ line[--readLen] = 0;
+ }
- if (readLen > superTblInfo->lenOfOneRow) {
- printf("sample row len[%d] overflow define schema len[%"PRIu64"], so discard this row\n",
- (int32_t)readLen, superTblInfo->lenOfOneRow);
- continue;
- }
+ if (readLen == 0) {
+ continue;
+ }
- memcpy(superTblInfo->sampleDataBuf + getRows * superTblInfo->lenOfOneRow,
- line, readLen);
- getRows++;
+ if (readLen > superTblInfo->lenOfOneRow) {
+ printf("sample row len[%d] overflow define schema len[%"PRIu64"], so discard this row\n",
+ (int32_t)readLen, superTblInfo->lenOfOneRow);
+ continue;
+ }
- if (getRows == MAX_SAMPLES_ONCE_FROM_FILE) {
- break;
+ memcpy(superTblInfo->sampleDataBuf + getRows * superTblInfo->lenOfOneRow,
+ line, readLen);
+ getRows++;
+
+ if (getRows == MAX_SAMPLES_ONCE_FROM_FILE) {
+ break;
+ }
}
- }
- fclose(fp);
- tmfree(line);
- return 0;
+ fclose(fp);
+ tmfree(line);
+ return 0;
}
static bool getColumnAndTagTypeFromInsertJsonFile(
cJSON* stbInfo, SSuperTable* superTbls) {
- bool ret = false;
-
- // columns
- cJSON *columns = cJSON_GetObjectItem(stbInfo, "columns");
- if (columns && columns->type != cJSON_Array) {
- printf("ERROR: failed to read json, columns not found\n");
- goto PARSE_OVER;
- } else if (NULL == columns) {
- superTbls->columnCount = 0;
- superTbls->tagCount = 0;
- return true;
- }
+ bool ret = false;
- int columnSize = cJSON_GetArraySize(columns);
- if ((columnSize + 1/* ts */) > TSDB_MAX_COLUMNS) {
- errorPrint("%s() LN%d, failed to read json, column size overflow, max column size is %d\n",
- __func__, __LINE__, TSDB_MAX_COLUMNS);
- goto PARSE_OVER;
- }
+ // columns
+ cJSON *columns = cJSON_GetObjectItem(stbInfo, "columns");
+ if (columns && columns->type != cJSON_Array) {
+ printf("ERROR: failed to read json, columns not found\n");
+ goto PARSE_OVER;
+ } else if (NULL == columns) {
+ superTbls->columnCount = 0;
+ superTbls->tagCount = 0;
+ return true;
+ }
- int count = 1;
- int index = 0;
- StrColumn columnCase;
+ int columnSize = cJSON_GetArraySize(columns);
+ if ((columnSize + 1/* ts */) > TSDB_MAX_COLUMNS) {
+ errorPrint("%s() LN%d, failed to read json, column size overflow, max column size is %d\n",
+ __func__, __LINE__, TSDB_MAX_COLUMNS);
+ goto PARSE_OVER;
+ }
- //superTbls->columnCount = columnSize;
- for (int k = 0; k < columnSize; ++k) {
- cJSON* column = cJSON_GetArrayItem(columns, k);
- if (column == NULL) continue;
+ int count = 1;
+ int index = 0;
+ StrColumn columnCase;
- count = 1;
- cJSON* countObj = cJSON_GetObjectItem(column, "count");
- if (countObj && countObj->type == cJSON_Number) {
- count = countObj->valueint;
- } else if (countObj && countObj->type != cJSON_Number) {
- errorPrint("%s() LN%d, failed to read json, column count not found\n",
- __func__, __LINE__);
- goto PARSE_OVER;
- } else {
- count = 1;
- }
-
- // column info
- memset(&columnCase, 0, sizeof(StrColumn));
- cJSON *dataType = cJSON_GetObjectItem(column, "type");
- if (!dataType || dataType->type != cJSON_String
- || dataType->valuestring == NULL) {
- errorPrint("%s() LN%d: failed to read json, column type not found\n",
- __func__, __LINE__);
- goto PARSE_OVER;
- }
- //tstrncpy(superTbls->columns[k].dataType, dataType->valuestring, MAX_TB_NAME_SIZE);
- tstrncpy(columnCase.dataType, dataType->valuestring, strlen(dataType->valuestring) + 1);
-
- cJSON* dataLen = cJSON_GetObjectItem(column, "len");
- if (dataLen && dataLen->type == cJSON_Number) {
- columnCase.dataLen = dataLen->valueint;
- } else if (dataLen && dataLen->type != cJSON_Number) {
- debugPrint("%s() LN%d: failed to read json, column len not found\n",
- __func__, __LINE__);
- goto PARSE_OVER;
- } else {
- columnCase.dataLen = 8;
- }
-
- for (int n = 0; n < count; ++n) {
- tstrncpy(superTbls->columns[index].dataType,
- columnCase.dataType, strlen(columnCase.dataType) + 1);
- superTbls->columns[index].dataLen = columnCase.dataLen;
- index++;
- }
- }
-
- if ((index + 1 /* ts */) > MAX_NUM_COLUMNS) {
- errorPrint("%s() LN%d, failed to read json, column size overflow, allowed max column size is %d\n",
- __func__, __LINE__, MAX_NUM_COLUMNS);
- goto PARSE_OVER;
- }
-
- superTbls->columnCount = index;
-
- count = 1;
- index = 0;
- // tags
- cJSON *tags = cJSON_GetObjectItem(stbInfo, "tags");
- if (!tags || tags->type != cJSON_Array) {
- errorPrint("%s() LN%d, failed to read json, tags not found\n",
- __func__, __LINE__);
- goto PARSE_OVER;
- }
-
- int tagSize = cJSON_GetArraySize(tags);
- if (tagSize > TSDB_MAX_TAGS) {
- errorPrint("%s() LN%d, failed to read json, tags size overflow, max tag size is %d\n",
- __func__, __LINE__, TSDB_MAX_TAGS);
- goto PARSE_OVER;
- }
-
- //superTbls->tagCount = tagSize;
- for (int k = 0; k < tagSize; ++k) {
- cJSON* tag = cJSON_GetArrayItem(tags, k);
- if (tag == NULL) continue;
+ //superTbls->columnCount = columnSize;
+ for (int k = 0; k < columnSize; ++k) {
+ cJSON* column = cJSON_GetArrayItem(columns, k);
+ if (column == NULL) continue;
- count = 1;
- cJSON* countObj = cJSON_GetObjectItem(tag, "count");
- if (countObj && countObj->type == cJSON_Number) {
- count = countObj->valueint;
- } else if (countObj && countObj->type != cJSON_Number) {
- printf("ERROR: failed to read json, column count not found\n");
- goto PARSE_OVER;
- } else {
- count = 1;
- }
-
- // column info
- memset(&columnCase, 0, sizeof(StrColumn));
- cJSON *dataType = cJSON_GetObjectItem(tag, "type");
- if (!dataType || dataType->type != cJSON_String
- || dataType->valuestring == NULL) {
- errorPrint("%s() LN%d, failed to read json, tag type not found\n",
- __func__, __LINE__);
- goto PARSE_OVER;
- }
- tstrncpy(columnCase.dataType, dataType->valuestring, strlen(dataType->valuestring) + 1);
-
- cJSON* dataLen = cJSON_GetObjectItem(tag, "len");
- if (dataLen && dataLen->type == cJSON_Number) {
- columnCase.dataLen = dataLen->valueint;
- } else if (dataLen && dataLen->type != cJSON_Number) {
- errorPrint("%s() LN%d, failed to read json, column len not found\n",
- __func__, __LINE__);
- goto PARSE_OVER;
- } else {
- columnCase.dataLen = 0;
- }
+ count = 1;
+ cJSON* countObj = cJSON_GetObjectItem(column, "count");
+ if (countObj && countObj->type == cJSON_Number) {
+ count = countObj->valueint;
+ } else if (countObj && countObj->type != cJSON_Number) {
+ errorPrint("%s() LN%d, failed to read json, column count not found\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ } else {
+ count = 1;
+ }
- for (int n = 0; n < count; ++n) {
- tstrncpy(superTbls->tags[index].dataType, columnCase.dataType,
- strlen(columnCase.dataType) + 1);
- superTbls->tags[index].dataLen = columnCase.dataLen;
- index++;
- }
- }
+ // column info
+ memset(&columnCase, 0, sizeof(StrColumn));
+ cJSON *dataType = cJSON_GetObjectItem(column, "type");
+ if (!dataType || dataType->type != cJSON_String
+ || dataType->valuestring == NULL) {
+ errorPrint("%s() LN%d: failed to read json, column type not found\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
+ //tstrncpy(superTbls->columns[k].dataType, dataType->valuestring, DATATYPE_BUFF_LEN);
+ tstrncpy(columnCase.dataType, dataType->valuestring,
+ min(DATATYPE_BUFF_LEN, strlen(dataType->valuestring) + 1));
- if (index > TSDB_MAX_TAGS) {
- errorPrint("%s() LN%d, failed to read json, tags size overflow, allowed max tag count is %d\n",
- __func__, __LINE__, TSDB_MAX_TAGS);
- goto PARSE_OVER;
- }
+ cJSON* dataLen = cJSON_GetObjectItem(column, "len");
+ if (dataLen && dataLen->type == cJSON_Number) {
+ columnCase.dataLen = dataLen->valueint;
+ } else if (dataLen && dataLen->type != cJSON_Number) {
+ debugPrint("%s() LN%d: failed to read json, column len not found\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ } else {
+ columnCase.dataLen = SMALL_BUFF_LEN;
+ }
- superTbls->tagCount = index;
+ for (int n = 0; n < count; ++n) {
+ tstrncpy(superTbls->columns[index].dataType,
+ columnCase.dataType,
+ min(DATATYPE_BUFF_LEN, strlen(columnCase.dataType) + 1));
+ superTbls->columns[index].dataLen = columnCase.dataLen;
+ index++;
+ }
+ }
- if ((superTbls->columnCount + superTbls->tagCount + 1 /* ts */) > TSDB_MAX_COLUMNS) {
- errorPrint("%s() LN%d, columns + tags is more than allowed max columns count: %d\n",
- __func__, __LINE__, TSDB_MAX_COLUMNS);
- goto PARSE_OVER;
- }
- ret = true;
+ if ((index + 1 /* ts */) > MAX_NUM_COLUMNS) {
+ errorPrint("%s() LN%d, failed to read json, column size overflow, allowed max column size is %d\n",
+ __func__, __LINE__, MAX_NUM_COLUMNS);
+ goto PARSE_OVER;
+ }
-PARSE_OVER:
- return ret;
-}
+ superTbls->columnCount = index;
-static bool getMetaFromInsertJsonFile(cJSON* root) {
+ count = 1;
+ index = 0;
+ // tags
+ cJSON *tags = cJSON_GetObjectItem(stbInfo, "tags");
+ if (!tags || tags->type != cJSON_Array) {
+ errorPrint("%s() LN%d, failed to read json, tags not found\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
+
+ int tagSize = cJSON_GetArraySize(tags);
+ if (tagSize > TSDB_MAX_TAGS) {
+ errorPrint("%s() LN%d, failed to read json, tags size overflow, max tag size is %d\n",
+ __func__, __LINE__, TSDB_MAX_TAGS);
+ goto PARSE_OVER;
+ }
+
+ //superTbls->tagCount = tagSize;
+ for (int k = 0; k < tagSize; ++k) {
+ cJSON* tag = cJSON_GetArrayItem(tags, k);
+ if (tag == NULL) continue;
+
+ count = 1;
+ cJSON* countObj = cJSON_GetObjectItem(tag, "count");
+ if (countObj && countObj->type == cJSON_Number) {
+ count = countObj->valueint;
+ } else if (countObj && countObj->type != cJSON_Number) {
+ printf("ERROR: failed to read json, column count not found\n");
+ goto PARSE_OVER;
+ } else {
+ count = 1;
+ }
+
+ // column info
+ memset(&columnCase, 0, sizeof(StrColumn));
+ cJSON *dataType = cJSON_GetObjectItem(tag, "type");
+ if (!dataType || dataType->type != cJSON_String
+ || dataType->valuestring == NULL) {
+ errorPrint("%s() LN%d, failed to read json, tag type not found\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
+ tstrncpy(columnCase.dataType, dataType->valuestring,
+ min(DATATYPE_BUFF_LEN, strlen(dataType->valuestring) + 1));
+
+ cJSON* dataLen = cJSON_GetObjectItem(tag, "len");
+ if (dataLen && dataLen->type == cJSON_Number) {
+ columnCase.dataLen = dataLen->valueint;
+ } else if (dataLen && dataLen->type != cJSON_Number) {
+ errorPrint("%s() LN%d, failed to read json, column len not found\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ } else {
+ columnCase.dataLen = 0;
+ }
+
+ for (int n = 0; n < count; ++n) {
+ tstrncpy(superTbls->tags[index].dataType, columnCase.dataType,
+ min(DATATYPE_BUFF_LEN, strlen(columnCase.dataType) + 1));
+ superTbls->tags[index].dataLen = columnCase.dataLen;
+ index++;
+ }
+ }
+
+ if (index > TSDB_MAX_TAGS) {
+ errorPrint("%s() LN%d, failed to read json, tags size overflow, allowed max tag count is %d\n",
+ __func__, __LINE__, TSDB_MAX_TAGS);
+ goto PARSE_OVER;
+ }
+
+ superTbls->tagCount = index;
+
+ if ((superTbls->columnCount + superTbls->tagCount + 1 /* ts */) > TSDB_MAX_COLUMNS) {
+ errorPrint("%s() LN%d, columns + tags is more than allowed max columns count: %d\n",
+ __func__, __LINE__, TSDB_MAX_COLUMNS);
+ goto PARSE_OVER;
+ }
+ ret = true;
+
+PARSE_OVER:
+ return ret;
+}
+
+static bool getMetaFromInsertJsonFile(cJSON* root) {
bool ret = false;
cJSON* cfgdir = cJSON_GetObjectItem(root, "cfgdir");
@@ -3779,9 +3953,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (precision && precision->type == cJSON_String
&& precision->valuestring != NULL) {
tstrncpy(g_Dbs.db[i].dbCfg.precision, precision->valuestring,
- 8);
+ SMALL_BUFF_LEN);
} else if (!precision) {
- memset(g_Dbs.db[i].dbCfg.precision, 0, 8);
+ memset(g_Dbs.db[i].dbCfg.precision, 0, SMALL_BUFF_LEN);
} else {
printf("ERROR: failed to read json, precision not found\n");
goto PARSE_OVER;
@@ -3966,7 +4140,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
goto PARSE_OVER;
}
tstrncpy(g_Dbs.db[i].superTbls[j].childTblPrefix, prefix->valuestring,
- TSDB_TABLE_NAME_LEN - 20);
+ TBNAME_PREFIX_LEN);
cJSON *autoCreateTbl = cJSON_GetObjectItem(stbInfo, "auto_create_table");
if (autoCreateTbl
@@ -4034,9 +4208,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (dataSource && dataSource->type == cJSON_String
&& dataSource->valuestring != NULL) {
tstrncpy(g_Dbs.db[i].superTbls[j].dataSource,
- dataSource->valuestring, TSDB_DB_NAME_LEN);
+ dataSource->valuestring,
+ min(SMALL_BUFF_LEN, strlen(dataSource->valuestring) + 1));
} else if (!dataSource) {
- tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", TSDB_DB_NAME_LEN);
+ tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand",
+ min(SMALL_BUFF_LEN, strlen("rand") + 1));
} else {
errorPrint("%s() LN%d, failed to read json, data_source not found\n",
__func__, __LINE__);
@@ -4107,7 +4283,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (timestampStep && timestampStep->type == cJSON_Number) {
g_Dbs.db[i].superTbls[j].timeStampStep = timestampStep->valueint;
} else if (!timestampStep) {
- g_Dbs.db[i].superTbls[j].timeStampStep = DEFAULT_TIMESTAMP_STEP;
+ g_Dbs.db[i].superTbls[j].timeStampStep = g_args.timestamp_step;
} else {
printf("ERROR: failed to read json, timestamp_step not found\n");
goto PARSE_OVER;
@@ -4117,9 +4293,12 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (sampleFormat && sampleFormat->type
== cJSON_String && sampleFormat->valuestring != NULL) {
tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat,
- sampleFormat->valuestring, TSDB_DB_NAME_LEN);
+ sampleFormat->valuestring,
+ min(SMALL_BUFF_LEN,
+ strlen(sampleFormat->valuestring) + 1));
} else if (!sampleFormat) {
- tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv", TSDB_DB_NAME_LEN);
+ tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv",
+ SMALL_BUFF_LEN);
} else {
printf("ERROR: failed to read json, sample_format not found\n");
goto PARSE_OVER;
@@ -4129,9 +4308,12 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (sampleFile && sampleFile->type == cJSON_String
&& sampleFile->valuestring != NULL) {
tstrncpy(g_Dbs.db[i].superTbls[j].sampleFile,
- sampleFile->valuestring, MAX_FILE_NAME_LEN);
+ sampleFile->valuestring,
+ min(MAX_FILE_NAME_LEN,
+ strlen(sampleFile->valuestring) + 1));
} else if (!sampleFile) {
- memset(g_Dbs.db[i].superTbls[j].sampleFile, 0, MAX_FILE_NAME_LEN);
+ memset(g_Dbs.db[i].superTbls[j].sampleFile, 0,
+ MAX_FILE_NAME_LEN);
} else {
printf("ERROR: failed to read json, sample_file not found\n");
goto PARSE_OVER;
@@ -4371,10 +4553,14 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
}
cJSON* queryMode = cJSON_GetObjectItem(root, "query_mode");
- if (queryMode && queryMode->type == cJSON_String && queryMode->valuestring != NULL) {
- tstrncpy(g_queryInfo.queryMode, queryMode->valuestring, MAX_TB_NAME_SIZE);
+ if (queryMode
+ && queryMode->type == cJSON_String
+ && queryMode->valuestring != NULL) {
+ tstrncpy(g_queryInfo.queryMode, queryMode->valuestring,
+ min(SMALL_BUFF_LEN, strlen(queryMode->valuestring) + 1));
} else if (!queryMode) {
- tstrncpy(g_queryInfo.queryMode, "taosc", MAX_TB_NAME_SIZE);
+ tstrncpy(g_queryInfo.queryMode, "taosc",
+ min(SMALL_BUFF_LEN, strlen("taosc") + 1));
} else {
printf("ERROR: failed to read json, query_mode not found\n");
goto PARSE_OVER;
@@ -4516,7 +4702,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
goto PARSE_OVER;
}
tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j],
- sqlStr->valuestring, MAX_QUERY_SQL_LENGTH);
+ sqlStr->valuestring, BUFFER_SIZE);
// default value is -1, which mean infinite loop
g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1;
@@ -4738,7 +4924,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
goto PARSE_OVER;
}
tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring,
- MAX_QUERY_SQL_LENGTH);
+ BUFFER_SIZE);
cJSON *result = cJSON_GetObjectItem(sql, "result");
if (result != NULL && result->type == cJSON_String
@@ -4862,11 +5048,22 @@ static void postFreeResource() {
}
}
}
+
+ tmfree(g_randbool_buff);
+ tmfree(g_randint_buff);
+ tmfree(g_rand_voltage_buff);
+ tmfree(g_randbigint_buff);
+ tmfree(g_randsmallint_buff);
+ tmfree(g_randtinyint_buff);
+ tmfree(g_randfloat_buff);
+ tmfree(g_rand_current_buff);
+ tmfree(g_rand_phase_buff);
+ tmfree(g_randdouble_buff);
}
static int getRowDataFromSample(
char* dataBuf, int64_t maxLen, int64_t timestamp,
- SSuperTable* superTblInfo, int64_t* sampleUsePos)
+ SSuperTable* superTblInfo, int64_t* sampleUsePos)
{
if ((*sampleUsePos) == MAX_SAMPLES_ONCE_FROM_FILE) {
/* int ret = readSampleFromCsvFileToMem(superTblInfo);
@@ -4924,56 +5121,61 @@ static int64_t generateStbRowData(
rand_string(buf, stbInfo->columns[i].dataLen);
dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\',", buf);
tmfree(buf);
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "INT", strlen("INT"))) {
- if ((g_args.demo_mode) && (i == 1)) {
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%d,", demo_voltage_int());
- } else {
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%d,", rand_int());
- }
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "BIGINT", strlen("BIGINT"))) {
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%"PRId64",", rand_bigint());
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "FLOAT", strlen("FLOAT"))) {
- if (g_args.demo_mode) {
- if (i == 0) {
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%f,", demo_current_float());
+ } else {
+ char *tmp;
+
+ if (0 == strncasecmp(stbInfo->columns[i].dataType,
+ "INT", strlen("INT"))) {
+ if ((g_args.demo_mode) && (i == 1)) {
+ tmp = demo_voltage_int_str();
} else {
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%f,", demo_phase_float());
+ tmp = rand_int_str();
}
- } else {
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%f,", rand_float());
+ tstrncpy(pstr + dataLen, tmp, INT_BUFF_LEN);
+ } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
+ "BIGINT", strlen("BIGINT"))) {
+ tmp = rand_bigint_str();
+ tstrncpy(pstr + dataLen, tmp, BIGINT_BUFF_LEN);
+ } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
+ "FLOAT", strlen("FLOAT"))) {
+ if (g_args.demo_mode) {
+ if (i == 0) {
+ tmp = demo_current_float_str();
+ } else {
+ tmp = demo_phase_float_str();
+ }
+ } else {
+ tmp = rand_float_str();
+ }
+ tstrncpy(pstr + dataLen, tmp, FLOAT_BUFF_LEN);
+ } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
+ "DOUBLE", strlen("DOUBLE"))) {
+ tmp = rand_double_str();
+ tstrncpy(pstr + dataLen, tmp, DOUBLE_BUFF_LEN);
+ } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
+ "SMALLINT", strlen("SMALLINT"))) {
+ tmp = rand_smallint_str();
+ tstrncpy(pstr + dataLen, tmp, SMALLINT_BUFF_LEN);
+ } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
+ "TINYINT", strlen("TINYINT"))) {
+ tmp = rand_tinyint_str();
+ tstrncpy(pstr + dataLen, tmp, TINYINT_BUFF_LEN);
+ } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
+ "BOOL", strlen("BOOL"))) {
+ tmp = rand_bool_str();
+ tstrncpy(pstr + dataLen, tmp, BOOL_BUFF_LEN);
+ } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
+ "TIMESTAMP", strlen("TIMESTAMP"))) {
+ tmp = rand_int_str();
+ tstrncpy(pstr + dataLen, tmp, INT_BUFF_LEN);
+ } else {
+ errorPrint( "Not support data type: %s\n", stbInfo->columns[i].dataType);
+ return -1;
}
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "DOUBLE", strlen("DOUBLE"))) {
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%f,", rand_double());
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "SMALLINT", strlen("SMALLINT"))) {
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%d,", rand_smallint());
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "TINYINT", strlen("TINYINT"))) {
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%d,", rand_tinyint());
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "BOOL", strlen("BOOL"))) {
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%d,", rand_bool());
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "TIMESTAMP", strlen("TIMESTAMP"))) {
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "%"PRId64",", rand_bigint());
- } else {
- errorPrint( "Not support data type: %s\n", stbInfo->columns[i].dataType);
- return -1;
+
+ dataLen += strlen(tmp);
+ tstrncpy(pstr + dataLen, ",", 2);
+ dataLen += 1;
}
}
@@ -5118,10 +5320,13 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k)
#if STMT_IFACE_ENABLED == 1
case STMT_IFACE:
- debugPrint("%s() LN%d, stmt=%p", __func__, __LINE__, pThreadInfo->stmt);
+ debugPrint("%s() LN%d, stmt=%p",
+ __func__, __LINE__, pThreadInfo->stmt);
if (0 != taos_stmt_execute(pThreadInfo->stmt)) {
- errorPrint("%s() LN%d, failied to execute insert statement\n",
- __func__, __LINE__);
+ errorPrint("%s() LN%d, failied to execute insert statement. reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(pThreadInfo->stmt));
+
+ fprintf(stderr, "\n\033[31m === Please reduce batch number if WAL size exceeds limit. ===\033[0m\n\n");
exit(-1);
}
affectedRows = k;
@@ -5189,13 +5394,13 @@ static int32_t generateDataTailWithoutStb(
if (g_args.disorderRatio) {
retLen = generateData(data, data_type,
startTime + getTSRandTail(
- (int64_t) DEFAULT_TIMESTAMP_STEP, k,
+ g_args.timestamp_step, k,
g_args.disorderRatio,
g_args.disorderRange),
lenOfBinary);
} else {
retLen = generateData(data, data_type,
- startTime + (int64_t) (DEFAULT_TIMESTAMP_STEP* k),
+ startTime + g_args.timestamp_step * k,
lenOfBinary);
}
@@ -5458,39 +5663,39 @@ static int64_t generateInterlaceDataWithoutStb(
int64_t startTime,
uint64_t *pRemainderBufLen)
{
- assert(buffer);
- char *pstr = buffer;
+ assert(buffer);
+ char *pstr = buffer;
- int headLen = generateSQLHeadWithoutStb(
- tableName, dbName,
+ int headLen = generateSQLHeadWithoutStb(
+ tableName, dbName,
pstr, *pRemainderBufLen);
- if (headLen <= 0) {
- return 0;
- }
+ if (headLen <= 0) {
+ return 0;
+ }
- pstr += headLen;
- *pRemainderBufLen -= headLen;
+ pstr += headLen;
+ *pRemainderBufLen -= headLen;
- int64_t dataLen = 0;
+ int64_t dataLen = 0;
- int32_t k = generateDataTailWithoutStb(
+ int32_t k = generateDataTailWithoutStb(
batch, pstr, *pRemainderBufLen, insertRows, 0,
startTime,
&dataLen);
- if (k == batch) {
- pstr += dataLen;
- *pRemainderBufLen -= dataLen;
- } else {
- debugPrint("%s() LN%d, generated data tail: %d, not equal batch per table: %u\n",
- __func__, __LINE__, k, batch);
- pstr -= headLen;
- pstr[0] = '\0';
- k = 0;
- }
-
- return k;
+ if (k == batch) {
+ pstr += dataLen;
+ *pRemainderBufLen -= dataLen;
+ } else {
+ debugPrint("%s() LN%d, generated data tail: %d, not equal batch per table: %u\n",
+ __func__, __LINE__, k, batch);
+ pstr -= headLen;
+ pstr[0] = '\0';
+ k = 0;
+ }
+
+ return k;
}
#if STMT_IFACE_ENABLED == 1
@@ -5684,7 +5889,7 @@ static int32_t prepareStmtWithoutStb(
int ret = taos_stmt_set_tbname(stmt, tableName);
if (ret != 0) {
errorPrint("failed to execute taos_stmt_set_tbname(%s). return 0x%x. reason: %s\n",
- tableName, ret, taos_errstr(NULL));
+ tableName, ret, taos_stmt_errstr(stmt));
return ret;
}
@@ -5713,11 +5918,11 @@ static int32_t prepareStmtWithoutStb(
if (g_args.disorderRatio) {
*bind_ts = startTime + getTSRandTail(
- (int64_t)DEFAULT_TIMESTAMP_STEP, k,
+ g_args.timestamp_step, k,
g_args.disorderRatio,
g_args.disorderRange);
} else {
- *bind_ts = startTime + (int64_t)(DEFAULT_TIMESTAMP_STEP * k);
+ *bind_ts = startTime + g_args.timestamp_step * k;
}
bind->buffer_length = sizeof(int64_t);
bind->buffer = bind_ts;
@@ -5737,9 +5942,17 @@ static int32_t prepareStmtWithoutStb(
return -1;
}
}
- taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray);
+ if (0 != taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray)) {
+ errorPrint("%s() LN%d, stmt_bind_param() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ break;
+ }
// if msg > 3MB, break
- taos_stmt_add_batch(stmt);
+ if (0 != taos_stmt_add_batch(stmt)) {
+ errorPrint("%s() LN%d, stmt_add_batch() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ break;
+ }
k++;
recordFrom ++;
@@ -5771,6 +5984,8 @@ static int32_t prepareStbStmtBind(
TAOS_BIND *bind;
if (isColumn) {
+ int cursor = 0;
+
for (int i = 0; i < stbInfo->columnCount + 1; i ++) {
bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * i));
@@ -5794,7 +6009,6 @@ static int32_t prepareStbStmtBind(
ptr += bind->buffer_length;
} else {
- int cursor = 0;
if (sourceRand) {
if ( -1 == prepareStmtBindArrayByType(
@@ -5851,6 +6065,7 @@ static int32_t prepareStbStmtBind(
}
+ free(bindBuffer);
return 0;
}
@@ -5904,22 +6119,28 @@ static int32_t prepareStbStmt(
if (-1 == prepareStbStmtBind(
tagsArray, stbInfo, tagRand, -1, -1, false /* is tag */)) {
- free(tagsArray);
+ tmfree(tagsValBuf);
+ tmfree(tagsArray);
return -1;
}
ret = taos_stmt_set_tbname_tags(stmt, tableName, (TAOS_BIND *)tagsArray);
tmfree(tagsValBuf);
- tmfree((char *)tagsArray);
+ tmfree(tagsArray);
+
+ if (0 != ret) {
+ errorPrint("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ return -1;
+ }
} else {
ret = taos_stmt_set_tbname(stmt, tableName);
- }
-
- if (ret != 0) {
- errorPrint("failed to execute taos_stmt_set_tbname(%s). return 0x%x. reason: %s\n",
- tableName, ret, taos_errstr(NULL));
- return ret;
+ if (0 != ret) {
+ errorPrint("%s() LN%d, stmt_set_tbname() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ return -1;
+ }
}
char *bindArray = calloc(1, sizeof(TAOS_BIND) * (stbInfo->columnCount + 1));
@@ -5937,9 +6158,21 @@ static int32_t prepareStbStmt(
free(bindArray);
return -1;
}
- taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray);
+ ret = taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray);
+ if (0 != ret) {
+ errorPrint("%s() LN%d, stmt_bind_param() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ free(bindArray);
+ return -1;
+ }
// if msg > 3MB, break
- taos_stmt_add_batch(stmt);
+ ret = taos_stmt_add_batch(stmt);
+ if (0 != ret) {
+ errorPrint("%s() LN%d, stmt_add_batch() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ free(bindArray);
+ return -1;
+ }
k++;
recordFrom ++;
@@ -6043,27 +6276,27 @@ static int32_t generateProgressiveDataWithoutStb(
uint64_t recordFrom, int64_t startTime, /*int64_t *pSamplePos, */
int64_t *pRemainderBufLen)
{
- assert(buffer != NULL);
- char *pstr = buffer;
+ assert(buffer != NULL);
+ char *pstr = buffer;
- memset(buffer, 0, *pRemainderBufLen);
+ memset(buffer, 0, *pRemainderBufLen);
- int64_t headLen = generateSQLHeadWithoutStb(
- tableName, pThreadInfo->db_name,
- buffer, *pRemainderBufLen);
+ int64_t headLen = generateSQLHeadWithoutStb(
+ tableName, pThreadInfo->db_name,
+ buffer, *pRemainderBufLen);
- if (headLen <= 0) {
- return 0;
- }
- pstr += headLen;
- *pRemainderBufLen -= headLen;
+ if (headLen <= 0) {
+ return 0;
+ }
+ pstr += headLen;
+ *pRemainderBufLen -= headLen;
- int64_t dataLen;
+ int64_t dataLen;
- return generateDataTailWithoutStb(
- g_args.num_of_RPR, pstr, *pRemainderBufLen, insertRows, recordFrom,
- startTime,
- /*pSamplePos, */&dataLen);
+ return generateDataTailWithoutStb(
+ g_args.num_of_RPR, pstr, *pRemainderBufLen, insertRows, recordFrom,
+ startTime,
+ /*pSamplePos, */&dataLen);
}
static void printStatPerThread(threadInfo *pThreadInfo)
@@ -6106,7 +6339,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
insertRows = g_args.num_of_DPT;
interlaceRows = g_args.interlace_rows;
maxSqlLen = g_args.max_sql_len;
- nTimeStampStep = DEFAULT_TIMESTAMP_STEP;
+ nTimeStampStep = g_args.timestamp_step;
insert_interval = g_args.insert_interval;
}
@@ -6366,7 +6599,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
uint64_t maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len;
int64_t timeStampStep =
- superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP;
+ superTblInfo?superTblInfo->timeStampStep:g_args.timestamp_step;
int64_t insertRows =
(superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT;
verbosePrint("%s() LN%d insertRows=%"PRId64"\n",
@@ -6508,9 +6741,10 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
} // num_of_DPT
if ((g_args.verbose_print) &&
- (tableSeq == pThreadInfo->ntables - 1) && (superTblInfo) &&
- (0 == strncasecmp(
- superTblInfo->dataSource, "sample", strlen("sample")))) {
+ (tableSeq == pThreadInfo->ntables - 1) && (superTblInfo)
+ && (0 == strncasecmp(
+ superTblInfo->dataSource,
+ "sample", strlen("sample")))) {
verbosePrint("%s() LN%d samplePos=%"PRId64"\n",
__func__, __LINE__, pThreadInfo->samplePos);
}
@@ -6552,110 +6786,110 @@ static void* syncWrite(void *sarg) {
}
static void callBack(void *param, TAOS_RES *res, int code) {
- threadInfo* pThreadInfo = (threadInfo*)param;
- SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
-
- int insert_interval =
- superTblInfo?superTblInfo->insertInterval:g_args.insert_interval;
- if (insert_interval) {
- pThreadInfo->et = taosGetTimestampMs();
- if ((pThreadInfo->et - pThreadInfo->st) < insert_interval) {
- taosMsleep(insert_interval - (pThreadInfo->et - pThreadInfo->st)); // ms
- }
- }
-
- char *buffer = calloc(1, pThreadInfo->superTblInfo->maxSqlLen);
- char data[MAX_DATA_SIZE];
- char *pstr = buffer;
- pstr += sprintf(pstr, "insert into %s.%s%"PRId64" values",
- pThreadInfo->db_name, pThreadInfo->tb_prefix,
- pThreadInfo->start_table_from);
-// if (pThreadInfo->counter >= pThreadInfo->superTblInfo->insertRows) {
- if (pThreadInfo->counter >= g_args.num_of_RPR) {
- pThreadInfo->start_table_from++;
- pThreadInfo->counter = 0;
- }
- if (pThreadInfo->start_table_from > pThreadInfo->end_table_to) {
- tsem_post(&pThreadInfo->lock_sem);
- free(buffer);
- taos_free_result(res);
- return;
- }
-
- for (int i = 0; i < g_args.num_of_RPR; i++) {
- int rand_num = taosRandom() % 100;
- if (0 != pThreadInfo->superTblInfo->disorderRatio
- && rand_num < pThreadInfo->superTblInfo->disorderRatio) {
- int64_t d = pThreadInfo->lastTs
- - (taosRandom() % pThreadInfo->superTblInfo->disorderRange + 1);
- generateStbRowData(pThreadInfo->superTblInfo, data, d);
- } else {
- generateStbRowData(pThreadInfo->superTblInfo,
- data, pThreadInfo->lastTs += 1000);
+ threadInfo* pThreadInfo = (threadInfo*)param;
+ SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
+
+ int insert_interval =
+ superTblInfo?superTblInfo->insertInterval:g_args.insert_interval;
+ if (insert_interval) {
+ pThreadInfo->et = taosGetTimestampMs();
+ if ((pThreadInfo->et - pThreadInfo->st) < insert_interval) {
+ taosMsleep(insert_interval - (pThreadInfo->et - pThreadInfo->st)); // ms
+ }
}
- pstr += sprintf(pstr, "%s", data);
- pThreadInfo->counter++;
- if (pThreadInfo->counter >= pThreadInfo->superTblInfo->insertRows) {
- break;
+ char *buffer = calloc(1, pThreadInfo->superTblInfo->maxSqlLen);
+ char data[MAX_DATA_SIZE];
+ char *pstr = buffer;
+ pstr += sprintf(pstr, "insert into %s.%s%"PRId64" values",
+ pThreadInfo->db_name, pThreadInfo->tb_prefix,
+ pThreadInfo->start_table_from);
+ // if (pThreadInfo->counter >= pThreadInfo->superTblInfo->insertRows) {
+ if (pThreadInfo->counter >= g_args.num_of_RPR) {
+ pThreadInfo->start_table_from++;
+ pThreadInfo->counter = 0;
+ }
+ if (pThreadInfo->start_table_from > pThreadInfo->end_table_to) {
+ tsem_post(&pThreadInfo->lock_sem);
+ free(buffer);
+ taos_free_result(res);
+ return;
}
- }
- if (insert_interval) {
- pThreadInfo->st = taosGetTimestampMs();
- }
- taos_query_a(pThreadInfo->taos, buffer, callBack, pThreadInfo);
- free(buffer);
+ for (int i = 0; i < g_args.num_of_RPR; i++) {
+ int rand_num = taosRandom() % 100;
+ if (0 != pThreadInfo->superTblInfo->disorderRatio
+ && rand_num < pThreadInfo->superTblInfo->disorderRatio) {
+ int64_t d = pThreadInfo->lastTs
+ - (taosRandom() % pThreadInfo->superTblInfo->disorderRange + 1);
+ generateStbRowData(pThreadInfo->superTblInfo, data, d);
+ } else {
+ generateStbRowData(pThreadInfo->superTblInfo,
+ data, pThreadInfo->lastTs += 1000);
+ }
+ pstr += sprintf(pstr, "%s", data);
+ pThreadInfo->counter++;
- taos_free_result(res);
+ if (pThreadInfo->counter >= pThreadInfo->superTblInfo->insertRows) {
+ break;
+ }
+ }
+
+ if (insert_interval) {
+ pThreadInfo->st = taosGetTimestampMs();
+ }
+ taos_query_a(pThreadInfo->taos, buffer, callBack, pThreadInfo);
+ free(buffer);
+
+ taos_free_result(res);
}
static void *asyncWrite(void *sarg) {
- threadInfo *pThreadInfo = (threadInfo *)sarg;
- SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
+ threadInfo *pThreadInfo = (threadInfo *)sarg;
+ SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
- setThreadName("asyncWrite");
+ setThreadName("asyncWrite");
- pThreadInfo->st = 0;
- pThreadInfo->et = 0;
- pThreadInfo->lastTs = pThreadInfo->start_time;
+ pThreadInfo->st = 0;
+ pThreadInfo->et = 0;
+ pThreadInfo->lastTs = pThreadInfo->start_time;
- int insert_interval =
- superTblInfo?superTblInfo->insertInterval:g_args.insert_interval;
- if (insert_interval) {
- pThreadInfo->st = taosGetTimestampMs();
- }
- taos_query_a(pThreadInfo->taos, "show databases", callBack, pThreadInfo);
+ int insert_interval =
+ superTblInfo?superTblInfo->insertInterval:g_args.insert_interval;
+ if (insert_interval) {
+ pThreadInfo->st = taosGetTimestampMs();
+ }
+ taos_query_a(pThreadInfo->taos, "show databases", callBack, pThreadInfo);
- tsem_wait(&(pThreadInfo->lock_sem));
+ tsem_wait(&(pThreadInfo->lock_sem));
- return NULL;
+ return NULL;
}
static int convertHostToServAddr(char *host, uint16_t port, struct sockaddr_in *serv_addr)
{
- uint16_t rest_port = port + TSDB_PORT_HTTP;
- struct hostent *server = gethostbyname(host);
- if ((server == NULL) || (server->h_addr == NULL)) {
- errorPrint("%s", "ERROR, no such host");
- return -1;
- }
+ uint16_t rest_port = port + TSDB_PORT_HTTP;
+ struct hostent *server = gethostbyname(host);
+ if ((server == NULL) || (server->h_addr == NULL)) {
+ errorPrint("%s", "ERROR, no such host");
+ return -1;
+ }
- debugPrint("h_name: %s\nh_addr=%p\nh_addretype: %s\nh_length: %d\n",
+ debugPrint("h_name: %s\nh_addr=%p\nh_addretype: %s\nh_length: %d\n",
server->h_name,
server->h_addr,
(server->h_addrtype == AF_INET)?"ipv4":"ipv6",
server->h_length);
- memset(serv_addr, 0, sizeof(struct sockaddr_in));
- serv_addr->sin_family = AF_INET;
- serv_addr->sin_port = htons(rest_port);
+ memset(serv_addr, 0, sizeof(struct sockaddr_in));
+ serv_addr->sin_family = AF_INET;
+ serv_addr->sin_port = htons(rest_port);
#ifdef WINDOWS
- serv_addr->sin_addr.s_addr = inet_addr(host);
+ serv_addr->sin_addr.s_addr = inet_addr(host);
#else
- memcpy(&(serv_addr->sin_addr.s_addr), server->h_addr, server->h_length);
+ memcpy(&(serv_addr->sin_addr.s_addr), server->h_addr, server->h_length);
#endif
- return 0;
+ return 0;
}
static void startMultiThreadInsertData(int threads, char* db_name,
@@ -6724,14 +6958,17 @@ static void startMultiThreadInsertData(int threads, char* db_name,
int64_t limit;
uint64_t offset;
- if ((NULL != g_args.sqlFile) && (superTblInfo->childTblExists == TBL_NO_EXISTS) &&
- ((superTblInfo->childTblOffset != 0) || (superTblInfo->childTblLimit >= 0))) {
+ if ((NULL != g_args.sqlFile)
+ && (superTblInfo->childTblExists == TBL_NO_EXISTS)
+ && ((superTblInfo->childTblOffset != 0)
+ || (superTblInfo->childTblLimit >= 0))) {
printf("WARNING: offset and limit will not be used since the child tables not exists!\n");
}
if (superTblInfo->childTblExists == TBL_ALREADY_EXISTS) {
if ((superTblInfo->childTblLimit < 0)
- || ((superTblInfo->childTblOffset + superTblInfo->childTblLimit)
+ || ((superTblInfo->childTblOffset
+ + superTblInfo->childTblLimit)
> (superTblInfo->childTblCount))) {
superTblInfo->childTblLimit =
superTblInfo->childTblCount - superTblInfo->childTblOffset;
@@ -6837,7 +7074,8 @@ static void startMultiThreadInsertData(int threads, char* db_name,
#if STMT_IFACE_ENABLED == 1
if ((g_args.iface == STMT_IFACE)
- || ((superTblInfo) && (superTblInfo->iface == STMT_IFACE))) {
+ || ((superTblInfo)
+ && (superTblInfo->iface == STMT_IFACE))) {
int columnCount;
if (superTblInfo) {
@@ -6865,7 +7103,8 @@ static void startMultiThreadInsertData(int threads, char* db_name,
== superTblInfo->autoCreateTable)) {
pstr += sprintf(pstr, "INSERT INTO ? USING %s TAGS(?",
superTblInfo->sTblName);
- for (int tag = 0; tag < (superTblInfo->tagCount - 1); tag ++ ) {
+ for (int tag = 0; tag < (superTblInfo->tagCount - 1);
+ tag ++ ) {
pstr += sprintf(pstr, ",?");
}
pstr += sprintf(pstr, ") VALUES(?");
@@ -6882,7 +7121,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
int ret = taos_stmt_prepare(pThreadInfo->stmt, buffer, 0);
if (ret != 0){
errorPrint("failed to execute taos_stmt_prepare. return 0x%x. reason: %s\n",
- ret, taos_errstr(NULL));
+ ret, taos_stmt_errstr(pThreadInfo->stmt));
free(pids);
free(infos);
exit(-1);
@@ -7014,157 +7253,157 @@ static void startMultiThreadInsertData(int threads, char* db_name,
static void *readTable(void *sarg) {
#if 1
- threadInfo *pThreadInfo = (threadInfo *)sarg;
- TAOS *taos = pThreadInfo->taos;
- setThreadName("readTable");
- char command[BUFFER_SIZE] = "\0";
- uint64_t sTime = pThreadInfo->start_time;
- char *tb_prefix = pThreadInfo->tb_prefix;
- FILE *fp = fopen(pThreadInfo->filePath, "a");
- if (NULL == fp) {
- errorPrint( "fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno));
- return NULL;
- }
-
- int64_t num_of_DPT;
-/* if (pThreadInfo->superTblInfo) {
- num_of_DPT = pThreadInfo->superTblInfo->insertRows; // nrecords_per_table;
- } else {
- */
- num_of_DPT = g_args.num_of_DPT;
-// }
-
- int64_t num_of_tables = pThreadInfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1;
- int64_t totalData = num_of_DPT * num_of_tables;
- bool do_aggreFunc = g_Dbs.do_aggreFunc;
-
- int n = do_aggreFunc ? (sizeof(aggreFunc) / sizeof(aggreFunc[0])) : 2;
- if (!do_aggreFunc) {
- printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n");
- }
- printf("%"PRId64" records:\n", totalData);
- fprintf(fp, "| QFunctions | QRecords | QSpeed(R/s) | QLatency(ms) |\n");
-
- for (int j = 0; j < n; j++) {
- double totalT = 0;
- uint64_t count = 0;
- for (int64_t i = 0; i < num_of_tables; i++) {
- sprintf(command, "select %s from %s%"PRId64" where ts>= %" PRIu64,
- aggreFunc[j], tb_prefix, i, sTime);
-
- double t = taosGetTimestampMs();
- TAOS_RES *pSql = taos_query(taos, command);
- int32_t code = taos_errno(pSql);
-
- if (code != 0) {
- errorPrint( "Failed to query:%s\n", taos_errstr(pSql));
- taos_free_result(pSql);
- taos_close(taos);
- fclose(fp);
+ threadInfo *pThreadInfo = (threadInfo *)sarg;
+ TAOS *taos = pThreadInfo->taos;
+ setThreadName("readTable");
+ char command[BUFFER_SIZE] = "\0";
+ uint64_t sTime = pThreadInfo->start_time;
+ char *tb_prefix = pThreadInfo->tb_prefix;
+ FILE *fp = fopen(pThreadInfo->filePath, "a");
+ if (NULL == fp) {
+ errorPrint( "fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno));
return NULL;
- }
+ }
- while(taos_fetch_row(pSql) != NULL) {
- count++;
- }
+ int64_t num_of_DPT;
+ /* if (pThreadInfo->superTblInfo) {
+ num_of_DPT = pThreadInfo->superTblInfo->insertRows; // nrecords_per_table;
+ } else {
+ */
+ num_of_DPT = g_args.num_of_DPT;
+ // }
+
+ int64_t num_of_tables = pThreadInfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1;
+ int64_t totalData = num_of_DPT * num_of_tables;
+ bool do_aggreFunc = g_Dbs.do_aggreFunc;
+
+ int n = do_aggreFunc ? (sizeof(g_aggreFunc) / sizeof(g_aggreFunc[0])) : 2;
+ if (!do_aggreFunc) {
+ printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n");
+ }
+ printf("%"PRId64" records:\n", totalData);
+ fprintf(fp, "| QFunctions | QRecords | QSpeed(R/s) | QLatency(ms) |\n");
+
+ for (int j = 0; j < n; j++) {
+ double totalT = 0;
+ uint64_t count = 0;
+ for (int64_t i = 0; i < num_of_tables; i++) {
+ sprintf(command, "select %s from %s%"PRId64" where ts>= %" PRIu64,
+ g_aggreFunc[j], tb_prefix, i, sTime);
+
+ double t = taosGetTimestampMs();
+ TAOS_RES *pSql = taos_query(taos, command);
+ int32_t code = taos_errno(pSql);
+
+ if (code != 0) {
+ errorPrint( "Failed to query:%s\n", taos_errstr(pSql));
+ taos_free_result(pSql);
+ taos_close(taos);
+ fclose(fp);
+ return NULL;
+ }
- t = taosGetTimestampMs() - t;
- totalT += t;
+ while(taos_fetch_row(pSql) != NULL) {
+ count++;
+ }
- taos_free_result(pSql);
- }
+ t = taosGetTimestampMs() - t;
+ totalT += t;
- fprintf(fp, "|%10s | %"PRId64" | %12.2f | %10.2f |\n",
- aggreFunc[j][0] == '*' ? " * " : aggreFunc[j], totalData,
- (double)(num_of_tables * num_of_DPT) / totalT, totalT * 1000);
- printf("select %10s took %.6f second(s)\n", aggreFunc[j], totalT * 1000);
- }
- fprintf(fp, "\n");
- fclose(fp);
+ taos_free_result(pSql);
+ }
+
+ fprintf(fp, "|%10s | %"PRId64" | %12.2f | %10.2f |\n",
+ g_aggreFunc[j][0] == '*' ? " * " : g_aggreFunc[j], totalData,
+ (double)(num_of_tables * num_of_DPT) / totalT, totalT * 1000);
+ printf("select %10s took %.6f second(s)\n", g_aggreFunc[j], totalT * 1000);
+ }
+ fprintf(fp, "\n");
+ fclose(fp);
#endif
- return NULL;
+ return NULL;
}
static void *readMetric(void *sarg) {
#if 1
- threadInfo *pThreadInfo = (threadInfo *)sarg;
- TAOS *taos = pThreadInfo->taos;
- setThreadName("readMetric");
- char command[BUFFER_SIZE] = "\0";
- FILE *fp = fopen(pThreadInfo->filePath, "a");
- if (NULL == fp) {
- printf("fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno));
- return NULL;
- }
+ threadInfo *pThreadInfo = (threadInfo *)sarg;
+ TAOS *taos = pThreadInfo->taos;
+ setThreadName("readMetric");
+ char command[BUFFER_SIZE] = "\0";
+ FILE *fp = fopen(pThreadInfo->filePath, "a");
+ if (NULL == fp) {
+ printf("fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno));
+ return NULL;
+ }
- int64_t num_of_DPT = pThreadInfo->superTblInfo->insertRows;
- int64_t num_of_tables = pThreadInfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1;
- int64_t totalData = num_of_DPT * num_of_tables;
- bool do_aggreFunc = g_Dbs.do_aggreFunc;
+ int64_t num_of_DPT = pThreadInfo->superTblInfo->insertRows;
+ int64_t num_of_tables = pThreadInfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1;
+ int64_t totalData = num_of_DPT * num_of_tables;
+ bool do_aggreFunc = g_Dbs.do_aggreFunc;
- int n = do_aggreFunc ? (sizeof(aggreFunc) / sizeof(aggreFunc[0])) : 2;
- if (!do_aggreFunc) {
- printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n");
- }
- printf("%"PRId64" records:\n", totalData);
- fprintf(fp, "Querying On %"PRId64" records:\n", totalData);
+ int n = do_aggreFunc ? (sizeof(g_aggreFunc) / sizeof(g_aggreFunc[0])) : 2;
+ if (!do_aggreFunc) {
+ printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n");
+ }
+ printf("%"PRId64" records:\n", totalData);
+ fprintf(fp, "Querying On %"PRId64" records:\n", totalData);
- for (int j = 0; j < n; j++) {
- char condition[COND_BUF_LEN] = "\0";
- char tempS[64] = "\0";
+ for (int j = 0; j < n; j++) {
+ char condition[COND_BUF_LEN] = "\0";
+ char tempS[64] = "\0";
- int64_t m = 10 < num_of_tables ? 10 : num_of_tables;
+ int64_t m = 10 < num_of_tables ? 10 : num_of_tables;
- for (int64_t i = 1; i <= m; i++) {
- if (i == 1) {
- sprintf(tempS, "t1 = %"PRId64"", i);
- } else {
- sprintf(tempS, " or t1 = %"PRId64" ", i);
- }
- strncat(condition, tempS, COND_BUF_LEN - 1);
+ for (int64_t i = 1; i <= m; i++) {
+ if (i == 1) {
+ sprintf(tempS, "t1 = %"PRId64"", i);
+ } else {
+ sprintf(tempS, " or t1 = %"PRId64" ", i);
+ }
+ strncat(condition, tempS, COND_BUF_LEN - 1);
- sprintf(command, "select %s from meters where %s", aggreFunc[j], condition);
+ sprintf(command, "select %s from meters where %s", g_aggreFunc[j], condition);
- printf("Where condition: %s\n", condition);
- fprintf(fp, "%s\n", command);
+ printf("Where condition: %s\n", condition);
+ fprintf(fp, "%s\n", command);
- double t = taosGetTimestampMs();
+ double t = taosGetTimestampMs();
- TAOS_RES *pSql = taos_query(taos, command);
- int32_t code = taos_errno(pSql);
+ TAOS_RES *pSql = taos_query(taos, command);
+ int32_t code = taos_errno(pSql);
- if (code != 0) {
- errorPrint( "Failed to query:%s\n", taos_errstr(pSql));
- taos_free_result(pSql);
- taos_close(taos);
- fclose(fp);
- return NULL;
- }
- int count = 0;
- while(taos_fetch_row(pSql) != NULL) {
- count++;
- }
- t = taosGetTimestampMs() - t;
+ if (code != 0) {
+ errorPrint( "Failed to query:%s\n", taos_errstr(pSql));
+ taos_free_result(pSql);
+ taos_close(taos);
+ fclose(fp);
+ return NULL;
+ }
+ int count = 0;
+ while(taos_fetch_row(pSql) != NULL) {
+ count++;
+ }
+ t = taosGetTimestampMs() - t;
- fprintf(fp, "| Speed: %12.2f(per s) | Latency: %.4f(ms) |\n",
- num_of_tables * num_of_DPT / (t * 1000.0), t);
- printf("select %10s took %.6f second(s)\n\n", aggreFunc[j], t * 1000.0);
+ fprintf(fp, "| Speed: %12.2f(per s) | Latency: %.4f(ms) |\n",
+ num_of_tables * num_of_DPT / (t * 1000.0), t);
+ printf("select %10s took %.6f second(s)\n\n", g_aggreFunc[j], t * 1000.0);
- taos_free_result(pSql);
+ taos_free_result(pSql);
+ }
+ fprintf(fp, "\n");
}
- fprintf(fp, "\n");
- }
- fclose(fp);
+ fclose(fp);
#endif
- return NULL;
+ return NULL;
}
static void prompt()
{
- if (!g_args.answer_yes) {
- printf(" Press enter key to continue or Ctrl-C to stop\n\n");
- (void)getchar();
- }
+ if (!g_args.answer_yes) {
+ printf(" Press enter key to continue or Ctrl-C to stop\n\n");
+ (void)getchar();
+ }
}
static int insertTestProcess() {
@@ -7264,369 +7503,369 @@ static int insertTestProcess() {
}
static void *specifiedTableQuery(void *sarg) {
- threadInfo *pThreadInfo = (threadInfo *)sarg;
+ threadInfo *pThreadInfo = (threadInfo *)sarg;
- setThreadName("specTableQuery");
+ setThreadName("specTableQuery");
- if (pThreadInfo->taos == NULL) {
- TAOS * taos = NULL;
- taos = taos_connect(g_queryInfo.host,
- g_queryInfo.user,
- g_queryInfo.password,
- NULL,
- g_queryInfo.port);
- if (taos == NULL) {
- errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
- pThreadInfo->threadID, taos_errstr(NULL));
- return NULL;
- } else {
- pThreadInfo->taos = taos;
+ if (pThreadInfo->taos == NULL) {
+ TAOS * taos = NULL;
+ taos = taos_connect(g_queryInfo.host,
+ g_queryInfo.user,
+ g_queryInfo.password,
+ NULL,
+ g_queryInfo.port);
+ if (taos == NULL) {
+ errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
+ pThreadInfo->threadID, taos_errstr(NULL));
+ return NULL;
+ } else {
+ pThreadInfo->taos = taos;
+ }
}
- }
- char sqlStr[TSDB_DB_NAME_LEN + 5];
- sprintf(sqlStr, "use %s", g_queryInfo.dbName);
- if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) {
- taos_close(pThreadInfo->taos);
- errorPrint( "use database %s failed!\n\n",
+ char sqlStr[TSDB_DB_NAME_LEN + 5];
+ sprintf(sqlStr, "use %s", g_queryInfo.dbName);
+ if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) {
+ taos_close(pThreadInfo->taos);
+ errorPrint( "use database %s failed!\n\n",
g_queryInfo.dbName);
- return NULL;
- }
+ return NULL;
+ }
- uint64_t st = 0;
- uint64_t et = 0;
+ uint64_t st = 0;
+ uint64_t et = 0;
- uint64_t queryTimes = g_queryInfo.specifiedQueryInfo.queryTimes;
+ uint64_t queryTimes = g_queryInfo.specifiedQueryInfo.queryTimes;
- uint64_t totalQueried = 0;
- uint64_t lastPrintTime = taosGetTimestampMs();
- uint64_t startTs = taosGetTimestampMs();
+ uint64_t totalQueried = 0;
+ uint64_t lastPrintTime = taosGetTimestampMs();
+ uint64_t startTs = taosGetTimestampMs();
- if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != '\0') {
- sprintf(pThreadInfo->filePath, "%s-%d",
+ if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != '\0') {
+ sprintf(pThreadInfo->filePath, "%s-%d",
g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq],
pThreadInfo->threadID);
- }
-
- while(queryTimes --) {
- if (g_queryInfo.specifiedQueryInfo.queryInterval && (et - st) <
- (int64_t)g_queryInfo.specifiedQueryInfo.queryInterval) {
- taosMsleep(g_queryInfo.specifiedQueryInfo.queryInterval - (et - st)); // ms
}
- st = taosGetTimestampMs();
+ while(queryTimes --) {
+ if (g_queryInfo.specifiedQueryInfo.queryInterval && (et - st) <
+ (int64_t)g_queryInfo.specifiedQueryInfo.queryInterval) {
+ taosMsleep(g_queryInfo.specifiedQueryInfo.queryInterval - (et - st)); // ms
+ }
+
+ st = taosGetTimestampMs();
- selectAndGetResult(pThreadInfo,
- g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq]);
+ selectAndGetResult(pThreadInfo,
+ g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq]);
- et = taosGetTimestampMs();
- printf("=thread[%"PRId64"] use %s complete one sql, Spent %10.3f s\n",
- taosGetSelfPthreadId(), g_queryInfo.queryMode, (et - st)/1000.0);
+ et = taosGetTimestampMs();
+ printf("=thread[%"PRId64"] use %s complete one sql, Spent %10.3f s\n",
+ taosGetSelfPthreadId(), g_queryInfo.queryMode, (et - st)/1000.0);
- totalQueried ++;
- g_queryInfo.specifiedQueryInfo.totalQueried ++;
+ totalQueried ++;
+ g_queryInfo.specifiedQueryInfo.totalQueried ++;
- uint64_t currentPrintTime = taosGetTimestampMs();
- uint64_t endTs = taosGetTimestampMs();
- if (currentPrintTime - lastPrintTime > 30*1000) {
- debugPrint("%s() LN%d, endTs=%"PRIu64"ms, startTs=%"PRIu64"ms\n",
- __func__, __LINE__, endTs, startTs);
- printf("thread[%d] has currently completed queries: %"PRIu64", QPS: %10.6f\n",
+ uint64_t currentPrintTime = taosGetTimestampMs();
+ uint64_t endTs = taosGetTimestampMs();
+ if (currentPrintTime - lastPrintTime > 30*1000) {
+ debugPrint("%s() LN%d, endTs=%"PRIu64"ms, startTs=%"PRIu64"ms\n",
+ __func__, __LINE__, endTs, startTs);
+ printf("thread[%d] has currently completed queries: %"PRIu64", QPS: %10.6f\n",
pThreadInfo->threadID,
totalQueried,
(double)(totalQueried/((endTs-startTs)/1000.0)));
- lastPrintTime = currentPrintTime;
+ lastPrintTime = currentPrintTime;
+ }
}
- }
- return NULL;
+ return NULL;
}
static void replaceChildTblName(char* inSql, char* outSql, int tblIndex) {
- char sourceString[32] = "xxxx";
- char subTblName[MAX_TB_NAME_SIZE*3];
- sprintf(subTblName, "%s.%s",
- g_queryInfo.dbName,
- g_queryInfo.superQueryInfo.childTblName + tblIndex*TSDB_TABLE_NAME_LEN);
+ char sourceString[32] = "xxxx";
+ char subTblName[TSDB_TABLE_NAME_LEN];
+ sprintf(subTblName, "%s.%s",
+ g_queryInfo.dbName,
+ g_queryInfo.superQueryInfo.childTblName + tblIndex*TSDB_TABLE_NAME_LEN);
- //printf("inSql: %s\n", inSql);
+ //printf("inSql: %s\n", inSql);
- char* pos = strstr(inSql, sourceString);
- if (0 == pos) {
- return;
- }
-
- tstrncpy(outSql, inSql, pos - inSql + 1);
- //printf("1: %s\n", outSql);
- strncat(outSql, subTblName, MAX_QUERY_SQL_LENGTH - 1);
- //printf("2: %s\n", outSql);
- strncat(outSql, pos+strlen(sourceString), MAX_QUERY_SQL_LENGTH - 1);
- //printf("3: %s\n", outSql);
+ char* pos = strstr(inSql, sourceString);
+ if (0 == pos) {
+ return;
+ }
+
+ tstrncpy(outSql, inSql, pos - inSql + 1);
+ //printf("1: %s\n", outSql);
+ strncat(outSql, subTblName, BUFFER_SIZE - 1);
+ //printf("2: %s\n", outSql);
+ strncat(outSql, pos+strlen(sourceString), BUFFER_SIZE - 1);
+ //printf("3: %s\n", outSql);
}
static void *superTableQuery(void *sarg) {
- char sqlstr[MAX_QUERY_SQL_LENGTH];
- threadInfo *pThreadInfo = (threadInfo *)sarg;
+ char sqlstr[BUFFER_SIZE];
+ threadInfo *pThreadInfo = (threadInfo *)sarg;
- setThreadName("superTableQuery");
+ setThreadName("superTableQuery");
- if (pThreadInfo->taos == NULL) {
- TAOS * taos = NULL;
- taos = taos_connect(g_queryInfo.host,
- g_queryInfo.user,
- g_queryInfo.password,
- NULL,
- g_queryInfo.port);
- if (taos == NULL) {
- errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
- pThreadInfo->threadID, taos_errstr(NULL));
- return NULL;
- } else {
- pThreadInfo->taos = taos;
+ if (pThreadInfo->taos == NULL) {
+ TAOS * taos = NULL;
+ taos = taos_connect(g_queryInfo.host,
+ g_queryInfo.user,
+ g_queryInfo.password,
+ NULL,
+ g_queryInfo.port);
+ if (taos == NULL) {
+ errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
+ pThreadInfo->threadID, taos_errstr(NULL));
+ return NULL;
+ } else {
+ pThreadInfo->taos = taos;
+ }
}
- }
- uint64_t st = 0;
- uint64_t et = (int64_t)g_queryInfo.superQueryInfo.queryInterval;
+ uint64_t st = 0;
+ uint64_t et = (int64_t)g_queryInfo.superQueryInfo.queryInterval;
- uint64_t queryTimes = g_queryInfo.superQueryInfo.queryTimes;
- uint64_t totalQueried = 0;
- uint64_t startTs = taosGetTimestampMs();
+ uint64_t queryTimes = g_queryInfo.superQueryInfo.queryTimes;
+ uint64_t totalQueried = 0;
+ uint64_t startTs = taosGetTimestampMs();
- uint64_t lastPrintTime = taosGetTimestampMs();
- while(queryTimes --) {
- if (g_queryInfo.superQueryInfo.queryInterval
- && (et - st) < (int64_t)g_queryInfo.superQueryInfo.queryInterval) {
- taosMsleep(g_queryInfo.superQueryInfo.queryInterval - (et - st)); // ms
- //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to);
- }
-
- st = taosGetTimestampMs();
- for (int i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) {
- for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) {
- memset(sqlstr,0,sizeof(sqlstr));
- replaceChildTblName(g_queryInfo.superQueryInfo.sql[j], sqlstr, i);
- if (g_queryInfo.superQueryInfo.result[j][0] != '\0') {
- sprintf(pThreadInfo->filePath, "%s-%d",
- g_queryInfo.superQueryInfo.result[j],
- pThreadInfo->threadID);
- }
- selectAndGetResult(pThreadInfo, sqlstr);
+ uint64_t lastPrintTime = taosGetTimestampMs();
+ while(queryTimes --) {
+ if (g_queryInfo.superQueryInfo.queryInterval
+ && (et - st) < (int64_t)g_queryInfo.superQueryInfo.queryInterval) {
+ taosMsleep(g_queryInfo.superQueryInfo.queryInterval - (et - st)); // ms
+ //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to);
+ }
+
+ st = taosGetTimestampMs();
+ for (int i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) {
+ for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) {
+ memset(sqlstr,0,sizeof(sqlstr));
+ replaceChildTblName(g_queryInfo.superQueryInfo.sql[j], sqlstr, i);
+ if (g_queryInfo.superQueryInfo.result[j][0] != '\0') {
+ sprintf(pThreadInfo->filePath, "%s-%d",
+ g_queryInfo.superQueryInfo.result[j],
+ pThreadInfo->threadID);
+ }
+ selectAndGetResult(pThreadInfo, sqlstr);
- totalQueried++;
- g_queryInfo.superQueryInfo.totalQueried ++;
+ totalQueried++;
+ g_queryInfo.superQueryInfo.totalQueried ++;
- int64_t currentPrintTime = taosGetTimestampMs();
- int64_t endTs = taosGetTimestampMs();
- if (currentPrintTime - lastPrintTime > 30*1000) {
- printf("thread[%d] has currently completed queries: %"PRIu64", QPS: %10.3f\n",
- pThreadInfo->threadID,
- totalQueried,
- (double)(totalQueried/((endTs-startTs)/1000.0)));
- lastPrintTime = currentPrintTime;
+ int64_t currentPrintTime = taosGetTimestampMs();
+ int64_t endTs = taosGetTimestampMs();
+ if (currentPrintTime - lastPrintTime > 30*1000) {
+ printf("thread[%d] has currently completed queries: %"PRIu64", QPS: %10.3f\n",
+ pThreadInfo->threadID,
+ totalQueried,
+ (double)(totalQueried/((endTs-startTs)/1000.0)));
+ lastPrintTime = currentPrintTime;
+ }
+ }
}
- }
+ et = taosGetTimestampMs();
+ printf("####thread[%"PRId64"] complete all sqls to allocate all sub-tables[%"PRIu64" - %"PRIu64"] once queries duration:%.4fs\n\n",
+ taosGetSelfPthreadId(),
+ pThreadInfo->start_table_from,
+ pThreadInfo->end_table_to,
+ (double)(et - st)/1000.0);
}
- et = taosGetTimestampMs();
- printf("####thread[%"PRId64"] complete all sqls to allocate all sub-tables[%"PRIu64" - %"PRIu64"] once queries duration:%.4fs\n\n",
- taosGetSelfPthreadId(),
- pThreadInfo->start_table_from,
- pThreadInfo->end_table_to,
- (double)(et - st)/1000.0);
- }
- return NULL;
+ return NULL;
}
static int queryTestProcess() {
- setupForAnsiEscape();
- printfQueryMeta();
- resetAfterAnsiEscape();
-
- TAOS * taos = NULL;
- taos = taos_connect(g_queryInfo.host,
- g_queryInfo.user,
- g_queryInfo.password,
- NULL,
- g_queryInfo.port);
- if (taos == NULL) {
- errorPrint( "Failed to connect to TDengine, reason:%s\n",
- taos_errstr(NULL));
- exit(-1);
- }
-
- if (0 != g_queryInfo.superQueryInfo.sqlCount) {
- getAllChildNameOfSuperTable(taos,
- g_queryInfo.dbName,
- g_queryInfo.superQueryInfo.sTblName,
- &g_queryInfo.superQueryInfo.childTblName,
- &g_queryInfo.superQueryInfo.childTblCount);
- }
+ setupForAnsiEscape();
+ printfQueryMeta();
+ resetAfterAnsiEscape();
- prompt();
+ TAOS * taos = NULL;
+ taos = taos_connect(g_queryInfo.host,
+ g_queryInfo.user,
+ g_queryInfo.password,
+ NULL,
+ g_queryInfo.port);
+ if (taos == NULL) {
+ errorPrint( "Failed to connect to TDengine, reason:%s\n",
+ taos_errstr(NULL));
+ exit(-1);
+ }
+
+ if (0 != g_queryInfo.superQueryInfo.sqlCount) {
+ getAllChildNameOfSuperTable(taos,
+ g_queryInfo.dbName,
+ g_queryInfo.superQueryInfo.sTblName,
+ &g_queryInfo.superQueryInfo.childTblName,
+ &g_queryInfo.superQueryInfo.childTblCount);
+ }
+
+ prompt();
- if (g_args.debug_print || g_args.verbose_print) {
- printfQuerySystemInfo(taos);
- }
+ if (g_args.debug_print || g_args.verbose_print) {
+ printfQuerySystemInfo(taos);
+ }
- if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) {
- if (convertHostToServAddr(
- g_queryInfo.host, g_queryInfo.port, &g_queryInfo.serv_addr) != 0)
- exit(-1);
- }
+ if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) {
+ if (convertHostToServAddr(
+ g_queryInfo.host, g_queryInfo.port, &g_queryInfo.serv_addr) != 0)
+ exit(-1);
+ }
- pthread_t *pids = NULL;
- threadInfo *infos = NULL;
- //==== create sub threads for query from specify table
- int nConcurrent = g_queryInfo.specifiedQueryInfo.concurrent;
- uint64_t nSqlCount = g_queryInfo.specifiedQueryInfo.sqlCount;
+ pthread_t *pids = NULL;
+ threadInfo *infos = NULL;
+ //==== create sub threads for query from specify table
+ int nConcurrent = g_queryInfo.specifiedQueryInfo.concurrent;
+ uint64_t nSqlCount = g_queryInfo.specifiedQueryInfo.sqlCount;
- uint64_t startTs = taosGetTimestampMs();
+ uint64_t startTs = taosGetTimestampMs();
- if ((nSqlCount > 0) && (nConcurrent > 0)) {
+ if ((nSqlCount > 0) && (nConcurrent > 0)) {
- pids = calloc(1, nConcurrent * nSqlCount * sizeof(pthread_t));
- infos = calloc(1, nConcurrent * nSqlCount * sizeof(threadInfo));
+ pids = calloc(1, nConcurrent * nSqlCount * sizeof(pthread_t));
+ infos = calloc(1, nConcurrent * nSqlCount * sizeof(threadInfo));
- if ((NULL == pids) || (NULL == infos)) {
- taos_close(taos);
- ERROR_EXIT("memory allocation failed for create threads\n");
- }
+ if ((NULL == pids) || (NULL == infos)) {
+ taos_close(taos);
+ ERROR_EXIT("memory allocation failed for create threads\n");
+ }
- for (uint64_t i = 0; i < nSqlCount; i++) {
- for (int j = 0; j < nConcurrent; j++) {
- uint64_t seq = i * nConcurrent + j;
- threadInfo *pThreadInfo = infos + seq;
- pThreadInfo->threadID = seq;
- pThreadInfo->querySeq = i;
+ for (uint64_t i = 0; i < nSqlCount; i++) {
+ for (int j = 0; j < nConcurrent; j++) {
+ uint64_t seq = i * nConcurrent + j;
+ threadInfo *pThreadInfo = infos + seq;
+ pThreadInfo->threadID = seq;
+ pThreadInfo->querySeq = i;
- if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) {
+ if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) {
- char sqlStr[MAX_TB_NAME_SIZE*2];
- sprintf(sqlStr, "use %s", g_queryInfo.dbName);
- if (0 != queryDbExec(taos, sqlStr, NO_INSERT_TYPE, false)) {
- taos_close(taos);
- free(infos);
- free(pids);
- errorPrint( "use database %s failed!\n\n",
- g_queryInfo.dbName);
- return -1;
+ char sqlStr[TSDB_DB_NAME_LEN + 5];
+ sprintf(sqlStr, "USE %s", g_queryInfo.dbName);
+ if (0 != queryDbExec(taos, sqlStr, NO_INSERT_TYPE, false)) {
+ taos_close(taos);
+ free(infos);
+ free(pids);
+ errorPrint( "use database %s failed!\n\n",
+ g_queryInfo.dbName);
+ return -1;
+ }
}
- }
- pThreadInfo->taos = NULL;// TODO: workaround to use separate taos connection;
+ pThreadInfo->taos = NULL;// TODO: workaround to use separate taos connection;
- pthread_create(pids + seq, NULL, specifiedTableQuery,
- pThreadInfo);
+ pthread_create(pids + seq, NULL, specifiedTableQuery,
+ pThreadInfo);
+ }
}
+ } else {
+ g_queryInfo.specifiedQueryInfo.concurrent = 0;
}
- } else {
- g_queryInfo.specifiedQueryInfo.concurrent = 0;
- }
- taos_close(taos);
+ taos_close(taos);
- pthread_t *pidsOfSub = NULL;
- threadInfo *infosOfSub = NULL;
- //==== create sub threads for query from all sub table of the super table
- if ((g_queryInfo.superQueryInfo.sqlCount > 0)
- && (g_queryInfo.superQueryInfo.threadCnt > 0)) {
- pidsOfSub = calloc(1, g_queryInfo.superQueryInfo.threadCnt * sizeof(pthread_t));
- infosOfSub = calloc(1, g_queryInfo.superQueryInfo.threadCnt * sizeof(threadInfo));
+ pthread_t *pidsOfSub = NULL;
+ threadInfo *infosOfSub = NULL;
+ //==== create sub threads for query from all sub table of the super table
+ if ((g_queryInfo.superQueryInfo.sqlCount > 0)
+ && (g_queryInfo.superQueryInfo.threadCnt > 0)) {
+ pidsOfSub = calloc(1, g_queryInfo.superQueryInfo.threadCnt * sizeof(pthread_t));
+ infosOfSub = calloc(1, g_queryInfo.superQueryInfo.threadCnt * sizeof(threadInfo));
- if ((NULL == pidsOfSub) || (NULL == infosOfSub)) {
- free(infos);
- free(pids);
+ if ((NULL == pidsOfSub) || (NULL == infosOfSub)) {
+ free(infos);
+ free(pids);
- ERROR_EXIT("memory allocation failed for create threads\n");
- }
+ ERROR_EXIT("memory allocation failed for create threads\n");
+ }
- int64_t ntables = g_queryInfo.superQueryInfo.childTblCount;
- int threads = g_queryInfo.superQueryInfo.threadCnt;
+ int64_t ntables = g_queryInfo.superQueryInfo.childTblCount;
+ int threads = g_queryInfo.superQueryInfo.threadCnt;
- int64_t a = ntables / threads;
- if (a < 1) {
- threads = ntables;
- a = 1;
- }
+ int64_t a = ntables / threads;
+ if (a < 1) {
+ threads = ntables;
+ a = 1;
+ }
- int64_t b = 0;
- if (threads != 0) {
- b = ntables % threads;
- }
+ int64_t b = 0;
+ if (threads != 0) {
+ b = ntables % threads;
+ }
- uint64_t tableFrom = 0;
- for (int i = 0; i < threads; i++) {
- threadInfo *pThreadInfo = infosOfSub + i;
- pThreadInfo->threadID = i;
+ uint64_t tableFrom = 0;
+ for (int i = 0; i < threads; i++) {
+ threadInfo *pThreadInfo = infosOfSub + i;
+ pThreadInfo->threadID = i;
- pThreadInfo->start_table_from = tableFrom;
- pThreadInfo->ntables = iend_table_to = i < b ? tableFrom + a : tableFrom + a - 1;
- tableFrom = pThreadInfo->end_table_to + 1;
- pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection;
- pthread_create(pidsOfSub + i, NULL, superTableQuery, pThreadInfo);
- }
+ pThreadInfo->start_table_from = tableFrom;
+ pThreadInfo->ntables = iend_table_to = i < b ? tableFrom + a : tableFrom + a - 1;
+ tableFrom = pThreadInfo->end_table_to + 1;
+ pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection;
+ pthread_create(pidsOfSub + i, NULL, superTableQuery, pThreadInfo);
+ }
- g_queryInfo.superQueryInfo.threadCnt = threads;
- } else {
- g_queryInfo.superQueryInfo.threadCnt = 0;
- }
+ g_queryInfo.superQueryInfo.threadCnt = threads;
+ } else {
+ g_queryInfo.superQueryInfo.threadCnt = 0;
+ }
- if ((nSqlCount > 0) && (nConcurrent > 0)) {
- for (int i = 0; i < nConcurrent; i++) {
- for (int j = 0; j < nSqlCount; j++) {
- pthread_join(pids[i * nSqlCount + j], NULL);
- }
+ if ((nSqlCount > 0) && (nConcurrent > 0)) {
+ for (int i = 0; i < nConcurrent; i++) {
+ for (int j = 0; j < nSqlCount; j++) {
+ pthread_join(pids[i * nSqlCount + j], NULL);
+ }
+ }
}
- }
- tmfree((char*)pids);
- tmfree((char*)infos);
+ tmfree((char*)pids);
+ tmfree((char*)infos);
- for (int i = 0; i < g_queryInfo.superQueryInfo.threadCnt; i++) {
- pthread_join(pidsOfSub[i], NULL);
- }
+ for (int i = 0; i < g_queryInfo.superQueryInfo.threadCnt; i++) {
+ pthread_join(pidsOfSub[i], NULL);
+ }
- tmfree((char*)pidsOfSub);
- tmfree((char*)infosOfSub);
+ tmfree((char*)pidsOfSub);
+ tmfree((char*)infosOfSub);
-// taos_close(taos);// TODO: workaround to use separate taos connection;
- uint64_t endTs = taosGetTimestampMs();
+ // taos_close(taos);// TODO: workaround to use separate taos connection;
+ uint64_t endTs = taosGetTimestampMs();
- uint64_t totalQueried = g_queryInfo.specifiedQueryInfo.totalQueried +
- g_queryInfo.superQueryInfo.totalQueried;
+ uint64_t totalQueried = g_queryInfo.specifiedQueryInfo.totalQueried +
+ g_queryInfo.superQueryInfo.totalQueried;
- fprintf(stderr, "==== completed total queries: %"PRIu64", the QPS of all threads: %10.3f====\n",
- totalQueried,
- (double)(totalQueried/((endTs-startTs)/1000.0)));
- return 0;
+ fprintf(stderr, "==== completed total queries: %"PRIu64", the QPS of all threads: %10.3f====\n",
+ totalQueried,
+ (double)(totalQueried/((endTs-startTs)/1000.0)));
+ return 0;
}
static void stable_sub_callback(
TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) {
- if (res == NULL || taos_errno(res) != 0) {
- errorPrint("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n",
- __func__, __LINE__, code, taos_errstr(res));
- return;
- }
+ if (res == NULL || taos_errno(res) != 0) {
+ errorPrint("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n",
+ __func__, __LINE__, code, taos_errstr(res));
+ return;
+ }
- if (param)
- fetchResult(res, (threadInfo *)param);
- // tao_unscribe() will free result.
+ if (param)
+ fetchResult(res, (threadInfo *)param);
+ // tao_unscribe() will free result.
}
static void specified_sub_callback(
TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) {
- if (res == NULL || taos_errno(res) != 0) {
- errorPrint("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n",
- __func__, __LINE__, code, taos_errstr(res));
- return;
- }
+ if (res == NULL || taos_errno(res) != 0) {
+ errorPrint("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n",
+ __func__, __LINE__, code, taos_errstr(res));
+ return;
+ }
- if (param)
- fetchResult(res, (threadInfo *)param);
- // tao_unscribe() will free result.
+ if (param)
+ fetchResult(res, (threadInfo *)param);
+ // tao_unscribe() will free result.
}
static TAOS_SUB* subscribeImpl(
@@ -7634,40 +7873,40 @@ static TAOS_SUB* subscribeImpl(
threadInfo *pThreadInfo,
char *sql, char* topic, bool restart, uint64_t interval)
{
- TAOS_SUB* tsub = NULL;
-
- if ((SPECIFIED_CLASS == class)
- && (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode)) {
- tsub = taos_subscribe(
- pThreadInfo->taos,
- restart,
- topic, sql, specified_sub_callback, (void*)pThreadInfo,
- g_queryInfo.specifiedQueryInfo.subscribeInterval);
- } else if ((STABLE_CLASS == class)
- && (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode)) {
- tsub = taos_subscribe(
- pThreadInfo->taos,
- restart,
- topic, sql, stable_sub_callback, (void*)pThreadInfo,
- g_queryInfo.superQueryInfo.subscribeInterval);
- } else {
- tsub = taos_subscribe(
- pThreadInfo->taos,
- restart,
- topic, sql, NULL, NULL, interval);
- }
-
- if (tsub == NULL) {
- errorPrint("failed to create subscription. topic:%s, sql:%s\n", topic, sql);
- return NULL;
- }
-
- return tsub;
+ TAOS_SUB* tsub = NULL;
+
+ if ((SPECIFIED_CLASS == class)
+ && (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode)) {
+ tsub = taos_subscribe(
+ pThreadInfo->taos,
+ restart,
+ topic, sql, specified_sub_callback, (void*)pThreadInfo,
+ g_queryInfo.specifiedQueryInfo.subscribeInterval);
+ } else if ((STABLE_CLASS == class)
+ && (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode)) {
+ tsub = taos_subscribe(
+ pThreadInfo->taos,
+ restart,
+ topic, sql, stable_sub_callback, (void*)pThreadInfo,
+ g_queryInfo.superQueryInfo.subscribeInterval);
+ } else {
+ tsub = taos_subscribe(
+ pThreadInfo->taos,
+ restart,
+ topic, sql, NULL, NULL, interval);
+ }
+
+ if (tsub == NULL) {
+ errorPrint("failed to create subscription. topic:%s, sql:%s\n", topic, sql);
+ return NULL;
+ }
+
+ return tsub;
}
static void *superSubscribe(void *sarg) {
threadInfo *pThreadInfo = (threadInfo *)sarg;
- char subSqlstr[MAX_QUERY_SQL_LENGTH];
+ char subSqlstr[BUFFER_SIZE];
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0};
uint64_t tsubSeq;
@@ -7692,8 +7931,8 @@ static void *superSubscribe(void *sarg) {
}
}
- char sqlStr[MAX_TB_NAME_SIZE*2];
- sprintf(sqlStr, "use %s", g_queryInfo.dbName);
+ char sqlStr[TSDB_DB_NAME_LEN + 5];
+ sprintf(sqlStr, "USE %s", g_queryInfo.dbName);
if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) {
taos_close(pThreadInfo->taos);
errorPrint( "use database %s failed!\n\n",
@@ -7816,291 +8055,291 @@ static void *superSubscribe(void *sarg) {
}
static void *specifiedSubscribe(void *sarg) {
- threadInfo *pThreadInfo = (threadInfo *)sarg;
-// TAOS_SUB* tsub = NULL;
+ threadInfo *pThreadInfo = (threadInfo *)sarg;
+ // TAOS_SUB* tsub = NULL;
- setThreadName("specSub");
+ setThreadName("specSub");
- if (pThreadInfo->taos == NULL) {
- pThreadInfo->taos = taos_connect(g_queryInfo.host,
- g_queryInfo.user,
- g_queryInfo.password,
- g_queryInfo.dbName,
- g_queryInfo.port);
if (pThreadInfo->taos == NULL) {
- errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
- pThreadInfo->threadID, taos_errstr(NULL));
- return NULL;
+ pThreadInfo->taos = taos_connect(g_queryInfo.host,
+ g_queryInfo.user,
+ g_queryInfo.password,
+ g_queryInfo.dbName,
+ g_queryInfo.port);
+ if (pThreadInfo->taos == NULL) {
+ errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
+ pThreadInfo->threadID, taos_errstr(NULL));
+ return NULL;
+ }
}
- }
- char sqlStr[MAX_TB_NAME_SIZE*2];
- sprintf(sqlStr, "use %s", g_queryInfo.dbName);
- if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) {
- taos_close(pThreadInfo->taos);
- return NULL;
- }
-
- sprintf(g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID],
- "taosdemo-subscribe-%"PRIu64"-%d",
- pThreadInfo->querySeq,
- pThreadInfo->threadID);
- if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != '\0') {
- sprintf(pThreadInfo->filePath, "%s-%d",
+ char sqlStr[TSDB_DB_NAME_LEN + 5];
+ sprintf(sqlStr, "USE %s", g_queryInfo.dbName);
+ if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) {
+ taos_close(pThreadInfo->taos);
+ return NULL;
+ }
+
+ sprintf(g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID],
+ "taosdemo-subscribe-%"PRIu64"-%d",
+ pThreadInfo->querySeq,
+ pThreadInfo->threadID);
+ if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != '\0') {
+ sprintf(pThreadInfo->filePath, "%s-%d",
g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq],
pThreadInfo->threadID);
- }
- g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID] = subscribeImpl(
- SPECIFIED_CLASS, pThreadInfo,
- g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq],
- g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID],
- g_queryInfo.specifiedQueryInfo.subscribeRestart,
- g_queryInfo.specifiedQueryInfo.subscribeInterval);
- if (NULL == g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]) {
- taos_close(pThreadInfo->taos);
- return NULL;
- }
-
- // start loop to consume result
-
- g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] = 0;
- while((g_queryInfo.specifiedQueryInfo.endAfterConsume[pThreadInfo->querySeq] == -1)
- || (g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] <
- g_queryInfo.specifiedQueryInfo.endAfterConsume[pThreadInfo->querySeq])) {
-
- printf("consumed[%d]: %d, endAfterConsum[%"PRId64"]: %d\n",
- pThreadInfo->threadID,
- g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID],
- pThreadInfo->querySeq,
- g_queryInfo.specifiedQueryInfo.endAfterConsume[pThreadInfo->querySeq]);
- if (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode) {
- continue;
- }
-
- g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID] = taos_consume(
- g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]);
- if (g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID]) {
- if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0]
- != 0) {
- sprintf(pThreadInfo->filePath, "%s-%d",
- g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq],
- pThreadInfo->threadID);
- }
- fetchResult(
- g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID],
- pThreadInfo);
-
- g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] ++;
- if ((g_queryInfo.specifiedQueryInfo.resubAfterConsume[pThreadInfo->querySeq] != -1)
- && (g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] >=
- g_queryInfo.specifiedQueryInfo.resubAfterConsume[pThreadInfo->querySeq])) {
- printf("keepProgress:%d, resub specified query: %"PRIu64"\n",
- g_queryInfo.specifiedQueryInfo.subscribeKeepProgress,
- pThreadInfo->querySeq);
- g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] = 0;
- taos_unsubscribe(g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID],
- g_queryInfo.specifiedQueryInfo.subscribeKeepProgress);
- g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID] =
- subscribeImpl(
- SPECIFIED_CLASS,
- pThreadInfo,
- g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq],
- g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID],
- g_queryInfo.specifiedQueryInfo.subscribeRestart,
- g_queryInfo.specifiedQueryInfo.subscribeInterval);
- if (NULL == g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]) {
- taos_close(pThreadInfo->taos);
- return NULL;
- }
- }
- }
- }
- taos_free_result(g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID]);
- taos_close(pThreadInfo->taos);
+ }
+ g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID] = subscribeImpl(
+ SPECIFIED_CLASS, pThreadInfo,
+ g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq],
+ g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID],
+ g_queryInfo.specifiedQueryInfo.subscribeRestart,
+ g_queryInfo.specifiedQueryInfo.subscribeInterval);
+ if (NULL == g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]) {
+ taos_close(pThreadInfo->taos);
+ return NULL;
+ }
+
+ // start loop to consume result
+
+ g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] = 0;
+ while((g_queryInfo.specifiedQueryInfo.endAfterConsume[pThreadInfo->querySeq] == -1)
+ || (g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] <
+ g_queryInfo.specifiedQueryInfo.endAfterConsume[pThreadInfo->querySeq])) {
+
+ printf("consumed[%d]: %d, endAfterConsum[%"PRId64"]: %d\n",
+ pThreadInfo->threadID,
+ g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID],
+ pThreadInfo->querySeq,
+ g_queryInfo.specifiedQueryInfo.endAfterConsume[pThreadInfo->querySeq]);
+ if (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode) {
+ continue;
+ }
+
+ g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID] = taos_consume(
+ g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]);
+ if (g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID]) {
+ if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0]
+ != 0) {
+ sprintf(pThreadInfo->filePath, "%s-%d",
+ g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq],
+ pThreadInfo->threadID);
+ }
+ fetchResult(
+ g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID],
+ pThreadInfo);
+
+ g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] ++;
+ if ((g_queryInfo.specifiedQueryInfo.resubAfterConsume[pThreadInfo->querySeq] != -1)
+ && (g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] >=
+ g_queryInfo.specifiedQueryInfo.resubAfterConsume[pThreadInfo->querySeq])) {
+ printf("keepProgress:%d, resub specified query: %"PRIu64"\n",
+ g_queryInfo.specifiedQueryInfo.subscribeKeepProgress,
+ pThreadInfo->querySeq);
+ g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] = 0;
+ taos_unsubscribe(g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID],
+ g_queryInfo.specifiedQueryInfo.subscribeKeepProgress);
+ g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID] =
+ subscribeImpl(
+ SPECIFIED_CLASS,
+ pThreadInfo,
+ g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq],
+ g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID],
+ g_queryInfo.specifiedQueryInfo.subscribeRestart,
+ g_queryInfo.specifiedQueryInfo.subscribeInterval);
+ if (NULL == g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]) {
+ taos_close(pThreadInfo->taos);
+ return NULL;
+ }
+ }
+ }
+ }
+ taos_free_result(g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID]);
+ taos_close(pThreadInfo->taos);
- return NULL;
+ return NULL;
}
static int subscribeTestProcess() {
- setupForAnsiEscape();
- printfQueryMeta();
- resetAfterAnsiEscape();
-
- prompt();
-
- TAOS * taos = NULL;
- taos = taos_connect(g_queryInfo.host,
- g_queryInfo.user,
- g_queryInfo.password,
- g_queryInfo.dbName,
- g_queryInfo.port);
- if (taos == NULL) {
- errorPrint( "Failed to connect to TDengine, reason:%s\n",
- taos_errstr(NULL));
- exit(-1);
- }
-
- if (0 != g_queryInfo.superQueryInfo.sqlCount) {
- getAllChildNameOfSuperTable(taos,
+ setupForAnsiEscape();
+ printfQueryMeta();
+ resetAfterAnsiEscape();
+
+ prompt();
+
+ TAOS * taos = NULL;
+ taos = taos_connect(g_queryInfo.host,
+ g_queryInfo.user,
+ g_queryInfo.password,
g_queryInfo.dbName,
- g_queryInfo.superQueryInfo.sTblName,
- &g_queryInfo.superQueryInfo.childTblName,
- &g_queryInfo.superQueryInfo.childTblCount);
- }
-
- taos_close(taos); // TODO: workaround to use separate taos connection;
-
- pthread_t *pids = NULL;
- threadInfo *infos = NULL;
-
- pthread_t *pidsOfStable = NULL;
- threadInfo *infosOfStable = NULL;
-
- //==== create threads for query for specified table
- if (g_queryInfo.specifiedQueryInfo.sqlCount <= 0) {
- debugPrint("%s() LN%d, sepcified query sqlCount %d.\n",
- __func__, __LINE__,
- g_queryInfo.specifiedQueryInfo.sqlCount);
- } else {
- if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) {
- errorPrint("%s() LN%d, sepcified query sqlCount %d.\n",
- __func__, __LINE__,
- g_queryInfo.specifiedQueryInfo.sqlCount);
+ g_queryInfo.port);
+ if (taos == NULL) {
+ errorPrint( "Failed to connect to TDengine, reason:%s\n",
+ taos_errstr(NULL));
exit(-1);
}
- pids = calloc(
- 1,
- g_queryInfo.specifiedQueryInfo.sqlCount *
- g_queryInfo.specifiedQueryInfo.concurrent *
- sizeof(pthread_t));
- infos = calloc(
- 1,
- g_queryInfo.specifiedQueryInfo.sqlCount *
- g_queryInfo.specifiedQueryInfo.concurrent *
- sizeof(threadInfo));
- if ((NULL == pids) || (NULL == infos)) {
- errorPrint("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__);
- exit(-1);
+ if (0 != g_queryInfo.superQueryInfo.sqlCount) {
+ getAllChildNameOfSuperTable(taos,
+ g_queryInfo.dbName,
+ g_queryInfo.superQueryInfo.sTblName,
+ &g_queryInfo.superQueryInfo.childTblName,
+ &g_queryInfo.superQueryInfo.childTblCount);
}
- for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
- for (int j = 0; j < g_queryInfo.specifiedQueryInfo.concurrent; j++) {
- uint64_t seq = i * g_queryInfo.specifiedQueryInfo.concurrent + j;
- threadInfo *pThreadInfo = infos + seq;
- pThreadInfo->threadID = seq;
- pThreadInfo->querySeq = i;
- pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection;
- pthread_create(pids + seq, NULL, specifiedSubscribe, pThreadInfo);
+ taos_close(taos); // TODO: workaround to use separate taos connection;
+
+ pthread_t *pids = NULL;
+ threadInfo *infos = NULL;
+
+ pthread_t *pidsOfStable = NULL;
+ threadInfo *infosOfStable = NULL;
+
+ //==== create threads for query for specified table
+ if (g_queryInfo.specifiedQueryInfo.sqlCount <= 0) {
+ debugPrint("%s() LN%d, sepcified query sqlCount %d.\n",
+ __func__, __LINE__,
+ g_queryInfo.specifiedQueryInfo.sqlCount);
+ } else {
+ if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) {
+ errorPrint("%s() LN%d, sepcified query sqlCount %d.\n",
+ __func__, __LINE__,
+ g_queryInfo.specifiedQueryInfo.sqlCount);
+ exit(-1);
}
- }
- }
- //==== create threads for super table query
- if (g_queryInfo.superQueryInfo.sqlCount <= 0) {
- debugPrint("%s() LN%d, super table query sqlCount %d.\n",
- __func__, __LINE__,
- g_queryInfo.superQueryInfo.sqlCount);
- } else {
- if ((g_queryInfo.superQueryInfo.sqlCount > 0)
- && (g_queryInfo.superQueryInfo.threadCnt > 0)) {
- pidsOfStable = calloc(
+ pids = calloc(
1,
- g_queryInfo.superQueryInfo.sqlCount *
- g_queryInfo.superQueryInfo.threadCnt *
- sizeof(pthread_t));
- infosOfStable = calloc(
+ g_queryInfo.specifiedQueryInfo.sqlCount *
+ g_queryInfo.specifiedQueryInfo.concurrent *
+ sizeof(pthread_t));
+ infos = calloc(
1,
- g_queryInfo.superQueryInfo.sqlCount *
- g_queryInfo.superQueryInfo.threadCnt *
- sizeof(threadInfo));
- if ((NULL == pidsOfStable) || (NULL == infosOfStable)) {
- errorPrint("%s() LN%d, malloc failed for create threads\n",
- __func__, __LINE__);
- // taos_close(taos);
+ g_queryInfo.specifiedQueryInfo.sqlCount *
+ g_queryInfo.specifiedQueryInfo.concurrent *
+ sizeof(threadInfo));
+ if ((NULL == pids) || (NULL == infos)) {
+ errorPrint("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__);
exit(-1);
}
- int64_t ntables = g_queryInfo.superQueryInfo.childTblCount;
- int threads = g_queryInfo.superQueryInfo.threadCnt;
-
- int64_t a = ntables / threads;
- if (a < 1) {
- threads = ntables;
- a = 1;
+ for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
+ for (int j = 0; j < g_queryInfo.specifiedQueryInfo.concurrent; j++) {
+ uint64_t seq = i * g_queryInfo.specifiedQueryInfo.concurrent + j;
+ threadInfo *pThreadInfo = infos + seq;
+ pThreadInfo->threadID = seq;
+ pThreadInfo->querySeq = i;
+ pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection;
+ pthread_create(pids + seq, NULL, specifiedSubscribe, pThreadInfo);
+ }
}
+ }
- int64_t b = 0;
- if (threads != 0) {
- b = ntables % threads;
- }
+ //==== create threads for super table query
+ if (g_queryInfo.superQueryInfo.sqlCount <= 0) {
+ debugPrint("%s() LN%d, super table query sqlCount %d.\n",
+ __func__, __LINE__,
+ g_queryInfo.superQueryInfo.sqlCount);
+ } else {
+ if ((g_queryInfo.superQueryInfo.sqlCount > 0)
+ && (g_queryInfo.superQueryInfo.threadCnt > 0)) {
+ pidsOfStable = calloc(
+ 1,
+ g_queryInfo.superQueryInfo.sqlCount *
+ g_queryInfo.superQueryInfo.threadCnt *
+ sizeof(pthread_t));
+ infosOfStable = calloc(
+ 1,
+ g_queryInfo.superQueryInfo.sqlCount *
+ g_queryInfo.superQueryInfo.threadCnt *
+ sizeof(threadInfo));
+ if ((NULL == pidsOfStable) || (NULL == infosOfStable)) {
+ errorPrint("%s() LN%d, malloc failed for create threads\n",
+ __func__, __LINE__);
+ // taos_close(taos);
+ exit(-1);
+ }
- for (uint64_t i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
- uint64_t tableFrom = 0;
- for (int j = 0; j < threads; j++) {
- uint64_t seq = i * threads + j;
- threadInfo *pThreadInfo = infosOfStable + seq;
- pThreadInfo->threadID = seq;
- pThreadInfo->querySeq = i;
+ int64_t ntables = g_queryInfo.superQueryInfo.childTblCount;
+ int threads = g_queryInfo.superQueryInfo.threadCnt;
- pThreadInfo->start_table_from = tableFrom;
- pThreadInfo->ntables = jend_table_to = jend_table_to + 1;
- pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection;
- pthread_create(pidsOfStable + seq,
- NULL, superSubscribe, pThreadInfo);
+ int64_t a = ntables / threads;
+ if (a < 1) {
+ threads = ntables;
+ a = 1;
}
- }
- g_queryInfo.superQueryInfo.threadCnt = threads;
+ int64_t b = 0;
+ if (threads != 0) {
+ b = ntables % threads;
+ }
+
+ for (uint64_t i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
+ uint64_t tableFrom = 0;
+ for (int j = 0; j < threads; j++) {
+ uint64_t seq = i * threads + j;
+ threadInfo *pThreadInfo = infosOfStable + seq;
+ pThreadInfo->threadID = seq;
+ pThreadInfo->querySeq = i;
+
+ pThreadInfo->start_table_from = tableFrom;
+ pThreadInfo->ntables = jend_table_to = jend_table_to + 1;
+ pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection;
+ pthread_create(pidsOfStable + seq,
+ NULL, superSubscribe, pThreadInfo);
+ }
+ }
+
+ g_queryInfo.superQueryInfo.threadCnt = threads;
- for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
- for (int j = 0; j < threads; j++) {
- uint64_t seq = i * threads + j;
- pthread_join(pidsOfStable[seq], NULL);
+ for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
+ for (int j = 0; j < threads; j++) {
+ uint64_t seq = i * threads + j;
+ pthread_join(pidsOfStable[seq], NULL);
+ }
}
}
}
- }
- for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
- for (int j = 0; j < g_queryInfo.specifiedQueryInfo.concurrent; j++) {
- uint64_t seq = i * g_queryInfo.specifiedQueryInfo.concurrent + j;
- pthread_join(pids[seq], NULL);
+ for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
+ for (int j = 0; j < g_queryInfo.specifiedQueryInfo.concurrent; j++) {
+ uint64_t seq = i * g_queryInfo.specifiedQueryInfo.concurrent + j;
+ pthread_join(pids[seq], NULL);
+ }
}
- }
- tmfree((char*)pids);
- tmfree((char*)infos);
+ tmfree((char*)pids);
+ tmfree((char*)infos);
- tmfree((char*)pidsOfStable);
- tmfree((char*)infosOfStable);
-// taos_close(taos);
- return 0;
+ tmfree((char*)pidsOfStable);
+ tmfree((char*)infosOfStable);
+ // taos_close(taos);
+ return 0;
}
static void initOfInsertMeta() {
- memset(&g_Dbs, 0, sizeof(SDbs));
+ memset(&g_Dbs, 0, sizeof(SDbs));
- // set default values
- tstrncpy(g_Dbs.host, "127.0.0.1", MAX_HOSTNAME_SIZE);
- g_Dbs.port = 6030;
- tstrncpy(g_Dbs.user, TSDB_DEFAULT_USER, MAX_USERNAME_SIZE);
- tstrncpy(g_Dbs.password, TSDB_DEFAULT_PASS, MAX_PASSWORD_SIZE);
- g_Dbs.threadCount = 2;
+ // set default values
+ tstrncpy(g_Dbs.host, "127.0.0.1", MAX_HOSTNAME_SIZE);
+ g_Dbs.port = 6030;
+ tstrncpy(g_Dbs.user, TSDB_DEFAULT_USER, MAX_USERNAME_SIZE);
+ tstrncpy(g_Dbs.password, TSDB_DEFAULT_PASS, MAX_PASSWORD_SIZE);
+ g_Dbs.threadCount = 2;
- g_Dbs.use_metric = g_args.use_metric;
+ g_Dbs.use_metric = g_args.use_metric;
}
static void initOfQueryMeta() {
- memset(&g_queryInfo, 0, sizeof(SQueryMetaInfo));
+ memset(&g_queryInfo, 0, sizeof(SQueryMetaInfo));
- // set default values
- tstrncpy(g_queryInfo.host, "127.0.0.1", MAX_HOSTNAME_SIZE);
- g_queryInfo.port = 6030;
- tstrncpy(g_queryInfo.user, TSDB_DEFAULT_USER, MAX_USERNAME_SIZE);
- tstrncpy(g_queryInfo.password, TSDB_DEFAULT_PASS, MAX_PASSWORD_SIZE);
+ // set default values
+ tstrncpy(g_queryInfo.host, "127.0.0.1", MAX_HOSTNAME_SIZE);
+ g_queryInfo.port = 6030;
+ tstrncpy(g_queryInfo.user, TSDB_DEFAULT_USER, MAX_USERNAME_SIZE);
+ tstrncpy(g_queryInfo.password, TSDB_DEFAULT_PASS, MAX_PASSWORD_SIZE);
}
static void setParaFromArg() {
@@ -8130,7 +8369,7 @@ static void setParaFromArg() {
tstrncpy(g_Dbs.db[0].dbName, g_args.database, TSDB_DB_NAME_LEN);
g_Dbs.db[0].dbCfg.replica = g_args.replica;
- tstrncpy(g_Dbs.db[0].dbCfg.precision, "ms", 8);
+ tstrncpy(g_Dbs.db[0].dbCfg.precision, "ms", SMALL_BUFF_LEN);
tstrncpy(g_Dbs.resultFile, g_args.output_file, MAX_FILE_NAME_LEN);
@@ -8163,8 +8402,8 @@ static void setParaFromArg() {
g_Dbs.db[0].superTbls[0].disorderRange = g_args.disorderRange;
g_Dbs.db[0].superTbls[0].disorderRatio = g_args.disorderRatio;
tstrncpy(g_Dbs.db[0].superTbls[0].childTblPrefix,
- g_args.tb_prefix, TSDB_TABLE_NAME_LEN - 20);
- tstrncpy(g_Dbs.db[0].superTbls[0].dataSource, "rand", MAX_TB_NAME_SIZE);
+ g_args.tb_prefix, TBNAME_PREFIX_LEN);
+ tstrncpy(g_Dbs.db[0].superTbls[0].dataSource, "rand", SMALL_BUFF_LEN);
if (g_args.iface == INTERFACE_BUT) {
g_Dbs.db[0].superTbls[0].iface = TAOSC_IFACE;
@@ -8173,7 +8412,7 @@ static void setParaFromArg() {
}
tstrncpy(g_Dbs.db[0].superTbls[0].startTimestamp,
"2017-07-14 10:40:00.000", MAX_TB_NAME_SIZE);
- g_Dbs.db[0].superTbls[0].timeStampStep = DEFAULT_TIMESTAMP_STEP;
+ g_Dbs.db[0].superTbls[0].timeStampStep = g_args.timestamp_step;
g_Dbs.db[0].superTbls[0].insertRows = g_args.num_of_DPT;
g_Dbs.db[0].superTbls[0].maxSqlLen = g_args.max_sql_len;
@@ -8185,7 +8424,7 @@ static void setParaFromArg() {
}
tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType,
- data_type[i], strlen(data_type[i]) + 1);
+ data_type[i], min(DATATYPE_BUFF_LEN, strlen(data_type[i]) + 1));
g_Dbs.db[0].superTbls[0].columns[i].dataLen = g_args.len_of_binary;
g_Dbs.db[0].superTbls[0].columnCount++;
}
@@ -8196,18 +8435,18 @@ static void setParaFromArg() {
for (int i = g_Dbs.db[0].superTbls[0].columnCount;
i < g_args.num_of_CPR; i++) {
tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType,
- "INT", strlen("INT") + 1);
+ "INT", min(DATATYPE_BUFF_LEN, strlen("INT") + 1));
g_Dbs.db[0].superTbls[0].columns[i].dataLen = 0;
g_Dbs.db[0].superTbls[0].columnCount++;
}
}
tstrncpy(g_Dbs.db[0].superTbls[0].tags[0].dataType,
- "INT", strlen("INT") + 1);
+ "INT", min(DATATYPE_BUFF_LEN, strlen("INT") + 1));
g_Dbs.db[0].superTbls[0].tags[0].dataLen = 0;
tstrncpy(g_Dbs.db[0].superTbls[0].tags[1].dataType,
- "BINARY", strlen("BINARY") + 1);
+ "BINARY", min(DATATYPE_BUFF_LEN, strlen("BINARY") + 1));
g_Dbs.db[0].superTbls[0].tags[1].dataLen = g_args.len_of_binary;
g_Dbs.db[0].superTbls[0].tagCount = 2;
} else {
@@ -8218,37 +8457,37 @@ static void setParaFromArg() {
/* Function to do regular expression check */
static int regexMatch(const char *s, const char *reg, int cflags) {
- regex_t regex;
- char msgbuf[100] = {0};
-
- /* Compile regular expression */
- if (regcomp(®ex, reg, cflags) != 0) {
- printf("Fail to compile regex\n");
- exit(-1);
- }
-
- /* Execute regular expression */
- int reti = regexec(®ex, s, 0, NULL, 0);
- if (!reti) {
- regfree(®ex);
- return 1;
- } else if (reti == REG_NOMATCH) {
- regfree(®ex);
+ regex_t regex;
+ char msgbuf[100] = {0};
+
+ /* Compile regular expression */
+ if (regcomp(®ex, reg, cflags) != 0) {
+ printf("Fail to compile regex\n");
+ exit(-1);
+ }
+
+ /* Execute regular expression */
+ int reti = regexec(®ex, s, 0, NULL, 0);
+ if (!reti) {
+ regfree(®ex);
+ return 1;
+ } else if (reti == REG_NOMATCH) {
+ regfree(®ex);
+ return 0;
+ } else {
+ regerror(reti, ®ex, msgbuf, sizeof(msgbuf));
+ printf("Regex match failed: %s\n", msgbuf);
+ regfree(®ex);
+ exit(-1);
+ }
+
return 0;
- } else {
- regerror(reti, ®ex, msgbuf, sizeof(msgbuf));
- printf("Regex match failed: %s\n", msgbuf);
- regfree(®ex);
- exit(-1);
- }
-
- return 0;
}
static int isCommentLine(char *line) {
- if (line == NULL) return 1;
+ if (line == NULL) return 1;
- return regexMatch(line, "^\\s*#.*", REG_EXTENDED);
+ return regexMatch(line, "^\\s*#.*", REG_EXTENDED);
}
static void querySqlFile(TAOS* taos, char* sqlFile)
@@ -8306,131 +8545,131 @@ static void querySqlFile(TAOS* taos, char* sqlFile)
static void testMetaFile() {
if (INSERT_TEST == g_args.test_mode) {
- if (g_Dbs.cfgDir[0])
- taos_options(TSDB_OPTION_CONFIGDIR, g_Dbs.cfgDir);
+ if (g_Dbs.cfgDir[0])
+ taos_options(TSDB_OPTION_CONFIGDIR, g_Dbs.cfgDir);
- insertTestProcess();
+ insertTestProcess();
} else if (QUERY_TEST == g_args.test_mode) {
- if (g_queryInfo.cfgDir[0])
- taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir);
+ if (g_queryInfo.cfgDir[0])
+ taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir);
- queryTestProcess();
+ queryTestProcess();
} else if (SUBSCRIBE_TEST == g_args.test_mode) {
- if (g_queryInfo.cfgDir[0])
- taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir);
+ if (g_queryInfo.cfgDir[0])
+ taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir);
- subscribeTestProcess();
+ subscribeTestProcess();
} else {
- ;
+ ;
}
}
static void queryResult() {
- // query data
-
- pthread_t read_id;
- threadInfo *pThreadInfo = calloc(1, sizeof(threadInfo));
- assert(pThreadInfo);
- pThreadInfo->start_time = 1500000000000; // 2017-07-14 10:40:00.000
- pThreadInfo->start_table_from = 0;
-
- //pThreadInfo->do_aggreFunc = g_Dbs.do_aggreFunc;
- if (g_args.use_metric) {
- pThreadInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount;
- pThreadInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1;
- pThreadInfo->superTblInfo = &g_Dbs.db[0].superTbls[0];
- tstrncpy(pThreadInfo->tb_prefix,
- g_Dbs.db[0].superTbls[0].childTblPrefix, TSDB_TABLE_NAME_LEN - 20);
- } else {
- pThreadInfo->ntables = g_args.num_of_tables;
- pThreadInfo->end_table_to = g_args.num_of_tables -1;
- tstrncpy(pThreadInfo->tb_prefix, g_args.tb_prefix, TSDB_TABLE_NAME_LEN);
- }
-
- pThreadInfo->taos = taos_connect(
- g_Dbs.host,
- g_Dbs.user,
- g_Dbs.password,
- g_Dbs.db[0].dbName,
- g_Dbs.port);
- if (pThreadInfo->taos == NULL) {
- errorPrint( "Failed to connect to TDengine, reason:%s\n",
- taos_errstr(NULL));
+ // query data
+
+ pthread_t read_id;
+ threadInfo *pThreadInfo = calloc(1, sizeof(threadInfo));
+ assert(pThreadInfo);
+ pThreadInfo->start_time = 1500000000000; // 2017-07-14 10:40:00.000
+ pThreadInfo->start_table_from = 0;
+
+ //pThreadInfo->do_aggreFunc = g_Dbs.do_aggreFunc;
+ if (g_args.use_metric) {
+ pThreadInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount;
+ pThreadInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1;
+ pThreadInfo->superTblInfo = &g_Dbs.db[0].superTbls[0];
+ tstrncpy(pThreadInfo->tb_prefix,
+ g_Dbs.db[0].superTbls[0].childTblPrefix, TBNAME_PREFIX_LEN);
+ } else {
+ pThreadInfo->ntables = g_args.num_of_tables;
+ pThreadInfo->end_table_to = g_args.num_of_tables -1;
+ tstrncpy(pThreadInfo->tb_prefix, g_args.tb_prefix, TSDB_TABLE_NAME_LEN);
+ }
+
+ pThreadInfo->taos = taos_connect(
+ g_Dbs.host,
+ g_Dbs.user,
+ g_Dbs.password,
+ g_Dbs.db[0].dbName,
+ g_Dbs.port);
+ if (pThreadInfo->taos == NULL) {
+ errorPrint( "Failed to connect to TDengine, reason:%s\n",
+ taos_errstr(NULL));
+ free(pThreadInfo);
+ exit(-1);
+ }
+
+ tstrncpy(pThreadInfo->filePath, g_Dbs.resultFile, MAX_FILE_NAME_LEN);
+
+ if (!g_Dbs.use_metric) {
+ pthread_create(&read_id, NULL, readTable, pThreadInfo);
+ } else {
+ pthread_create(&read_id, NULL, readMetric, pThreadInfo);
+ }
+ pthread_join(read_id, NULL);
+ taos_close(pThreadInfo->taos);
free(pThreadInfo);
- exit(-1);
- }
-
- tstrncpy(pThreadInfo->filePath, g_Dbs.resultFile, MAX_FILE_NAME_LEN);
-
- if (!g_Dbs.use_metric) {
- pthread_create(&read_id, NULL, readTable, pThreadInfo);
- } else {
- pthread_create(&read_id, NULL, readMetric, pThreadInfo);
- }
- pthread_join(read_id, NULL);
- taos_close(pThreadInfo->taos);
- free(pThreadInfo);
}
static void testCmdLine() {
- if (strlen(configDir)) {
- wordexp_t full_path;
- if (wordexp(configDir, &full_path, 0) != 0) {
- errorPrint( "Invalid path %s\n", configDir);
- return;
+ if (strlen(configDir)) {
+ wordexp_t full_path;
+ if (wordexp(configDir, &full_path, 0) != 0) {
+ errorPrint( "Invalid path %s\n", configDir);
+ return;
+ }
+ taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]);
+ wordfree(&full_path);
}
- taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]);
- wordfree(&full_path);
- }
- g_args.test_mode = INSERT_TEST;
- insertTestProcess();
+ g_args.test_mode = INSERT_TEST;
+ insertTestProcess();
- if (false == g_Dbs.insert_only)
- queryResult();
+ if (false == g_Dbs.insert_only)
+ queryResult();
}
int main(int argc, char *argv[]) {
- parse_args(argc, argv, &g_args);
+ parse_args(argc, argv, &g_args);
- debugPrint("meta file: %s\n", g_args.metaFile);
+ debugPrint("meta file: %s\n", g_args.metaFile);
- if (g_args.metaFile) {
- initOfInsertMeta();
- initOfQueryMeta();
+ if (g_args.metaFile) {
+ initOfInsertMeta();
+ initOfQueryMeta();
- if (false == getInfoFromJsonFile(g_args.metaFile)) {
- printf("Failed to read %s\n", g_args.metaFile);
- return 1;
- }
-
- testMetaFile();
- } else {
- memset(&g_Dbs, 0, sizeof(SDbs));
- setParaFromArg();
-
- if (NULL != g_args.sqlFile) {
- TAOS* qtaos = taos_connect(
- g_Dbs.host,
- g_Dbs.user,
- g_Dbs.password,
- g_Dbs.db[0].dbName,
- g_Dbs.port);
- querySqlFile(qtaos, g_args.sqlFile);
- taos_close(qtaos);
+ if (false == getInfoFromJsonFile(g_args.metaFile)) {
+ printf("Failed to read %s\n", g_args.metaFile);
+ return 1;
+ }
+ testMetaFile();
} else {
- testCmdLine();
- }
+ memset(&g_Dbs, 0, sizeof(SDbs));
+ setParaFromArg();
+
+ if (NULL != g_args.sqlFile) {
+ TAOS* qtaos = taos_connect(
+ g_Dbs.host,
+ g_Dbs.user,
+ g_Dbs.password,
+ g_Dbs.db[0].dbName,
+ g_Dbs.port);
+ querySqlFile(qtaos, g_args.sqlFile);
+ taos_close(qtaos);
- if (g_dupstr)
- free(g_dupstr);
- }
+ } else {
+ testCmdLine();
+ }
- return 0;
+ if (g_dupstr)
+ free(g_dupstr);
+ }
+
+ return 0;
}
diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c
index e5501b4366f24fdee9ace153e59b4a60dc67455f..dc36dbf6714b0f3de31e40dfc1bcebaeb2c61223 100644
--- a/src/kit/taosdump/taosdump.c
+++ b/src/kit/taosdump/taosdump.c
@@ -60,7 +60,7 @@ typedef struct {
fprintf(stderr, "VERB: "fmt, __VA_ARGS__); } while(0)
#define errorPrint(fmt, ...) \
- do { fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); } while(0)
+ do { fprintf(stderr, "\033[31m"); fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); fprintf(stderr, "\033[0m"); } while(0)
// -------------------------- SHOW DATABASE INTERFACE-----------------------
@@ -234,9 +234,9 @@ static struct argp_option options[] = {
{"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 4},
{"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 5},
#if TSDB_SUPPORT_NANOSECOND == 1
- {"precision", 'C', "PRECISION", 0, "Epoch precision. Valid value is one of ms, us, and ns. Default is ms.", 6},
+ {"precision", 'C', "PRECISION", 0, "Specify precision for converting human-readable time to epoch. Valid value is one of ms, us, and ns. Default is ms.", 6},
#else
- {"precision", 'C', "PRECISION", 0, "Epoch precision. Valid value is one of ms and us. Default is ms.", 6},
+ {"precision", 'C', "PRECISION", 0, "Use specified precision to convert human-readable time. Valid value is one of ms and us. Default is ms.", 6},
#endif
{"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Max value is 32766. Default is 1.", 3},
{"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3},
@@ -453,6 +453,8 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
case 'E':
g_args.end_time = atol(arg);
break;
+ case 'C':
+ break;
case 'B':
g_args.data_batch = atoi(arg);
if (g_args.data_batch > MAX_RECORDS_PER_REQ) {
@@ -545,8 +547,8 @@ static void parse_precision_first(
free(tmp);
exit(-1);
}
- strncpy(g_args.precision, tmp,
- min(DB_PRECISION_LEN - 1, strlen(tmp)));
+ tstrncpy(g_args.precision, tmp,
+ min(DB_PRECISION_LEN, strlen(tmp) + 1));
free(tmp);
}
}
@@ -597,10 +599,11 @@ static void parse_timestamp(
return;
}
} else {
+ tstrncpy(arguments->precision, "n/a", strlen("n/a") + 1);
tmpEpoch = atoll(tmp);
}
- sprintf(argv[i], "%"PRId64"", tmpEpoch);
+ sprintf(argv[i+1], "%"PRId64"", tmpEpoch);
debugPrint("%s() LN%d, tmp is: %s, argv[%d]: %s\n",
__func__, __LINE__, tmp, i, argv[i]);
free(tmp);
@@ -792,12 +795,14 @@ static int taosGetTableRecordInfo(
while ((row = taos_fetch_row(result)) != NULL) {
isSet = true;
pTableRecordInfo->isMetric = false;
- strncpy(pTableRecordInfo->tableRecord.name,
+ tstrncpy(pTableRecordInfo->tableRecord.name,
(char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
- fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
- strncpy(pTableRecordInfo->tableRecord.metric,
+ min(TSDB_TABLE_NAME_LEN,
+ fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes) + 1);
+ tstrncpy(pTableRecordInfo->tableRecord.metric,
(char *)row[TSDB_SHOW_TABLES_METRIC_INDEX],
- fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes);
+ min(TSDB_TABLE_NAME_LEN,
+ fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes) + 1);
break;
}
@@ -1078,8 +1083,8 @@ _dump_db_point:
goto _exit_failure;
}
- strncpy(g_dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX],
- fields[TSDB_SHOW_DB_NAME_INDEX].bytes);
+ tstrncpy(g_dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX],
+ min(TSDB_DB_NAME_LEN, fields[TSDB_SHOW_DB_NAME_INDEX].bytes) + 1);
if (g_args.with_property) {
g_dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
g_dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
@@ -1087,8 +1092,8 @@ _dump_db_point:
g_dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
g_dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
- strncpy(g_dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX],
- fields[TSDB_SHOW_DB_KEEP_INDEX].bytes);
+ tstrncpy(g_dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX],
+ min(32, fields[TSDB_SHOW_DB_KEEP_INDEX].bytes) + 1);
//g_dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]);
//g_dbInfos[count]->daysToKeep1;
//g_dbInfos[count]->daysToKeep2;
@@ -1101,8 +1106,8 @@ _dump_db_point:
g_dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
g_dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
- strncpy(g_dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX],
- fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes);
+ tstrncpy(g_dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX],
+ min(8, fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes) + 1);
//g_dbInfos[count]->precision = *((int8_t *)row[TSDB_SHOW_DB_PRECISION_INDEX]);
g_dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
}
@@ -1253,17 +1258,19 @@ static int taosGetTableDes(
tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN);
while ((row = taos_fetch_row(res)) != NULL) {
- strncpy(tableDes->cols[count].field,
+ tstrncpy(tableDes->cols[count].field,
(char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
- fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
- strncpy(tableDes->cols[count].type,
+ min(TSDB_COL_NAME_LEN + 1,
+ fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes + 1));
+ tstrncpy(tableDes->cols[count].type,
(char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
- min(15, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes));
+ min(16, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes + 1));
tableDes->cols[count].length =
*((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
- strncpy(tableDes->cols[count].note,
+ tstrncpy(tableDes->cols[count].note,
(char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
- fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes);
+ min(COL_NOTE_LEN,
+ fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes + 1));
count++;
}
@@ -1698,8 +1705,9 @@ static int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE
while ((row = taos_fetch_row(res)) != NULL) {
memset(&tableRecord, 0, sizeof(STableRecord));
- strncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
- fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
+ tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
+ min(TSDB_TABLE_NAME_LEN,
+ fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes) + 1);
taosWrite(fd, &tableRecord, sizeof(STableRecord));
}
@@ -1773,9 +1781,11 @@ static int taosDumpDb(SDbInfo *dbInfo, FILE *fp, TAOS *taosCon) {
while ((row = taos_fetch_row(res)) != NULL) {
memset(&tableRecord, 0, sizeof(STableRecord));
tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
- fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
+ min(TSDB_TABLE_NAME_LEN,
+ fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes) + 1);
tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX],
- min(TSDB_TABLE_NAME_LEN, fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes));
+ min(TSDB_TABLE_NAME_LEN,
+ fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes) + 1);
taosWrite(fd, &tableRecord, sizeof(STableRecord));
@@ -2163,7 +2173,7 @@ static int taosCheckParam(struct arguments *arguments) {
if (g_args.arg_list_len == 0) {
if ((!g_args.all_databases) && (!g_args.isDumpIn)) {
- fprintf(stderr, "taosdump requires parameters\n");
+ errorPrint("%s", "taosdump requires parameters for database and operation\n");
return -1;
}
}
diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c
index 5a0743dd1b2ad97350c8b8f2ce57d91254d60eaf..02cf1c782c54f61e9bf113d7319e1009e4e7d946 100644
--- a/src/mnode/src/mnodeDnode.c
+++ b/src/mnode/src/mnodeDnode.c
@@ -1146,6 +1146,7 @@ static int32_t mnodeRetrieveConfigs(SShowObj *pShow, char *data, int32_t rows, v
numOfRows++;
break;
case TAOS_CFG_VTYPE_FLOAT:
+ case TAOS_CFG_VTYPE_DOUBLE:
t = snprintf(varDataVal(pWrite), TSDB_CFG_VALUE_LEN, "%f", *((float *)cfg->ptr));
varDataSetLen(pWrite, t);
numOfRows++;
diff --git a/src/mnode/src/mnodeProfile.c b/src/mnode/src/mnodeProfile.c
index cbf713af6514ce305d91e7edc6710ba0d51eeeec..2c117310b38632305463438e34d79dc1c439388d 100644
--- a/src/mnode/src/mnodeProfile.c
+++ b/src/mnode/src/mnodeProfile.c
@@ -32,6 +32,7 @@
#define CONN_KEEP_TIME (tsShellActivityTimer * 3)
#define CONN_CHECK_TIME (tsShellActivityTimer * 2)
#define QUERY_ID_SIZE 20
+#define QUERY_OBJ_ID_SIZE 10
#define QUERY_STREAM_SAVE_SIZE 20
static SCacheObj *tsMnodeConnCache = NULL;
@@ -361,6 +362,30 @@ static int32_t mnodeGetQueryMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
+ pShow->bytes[cols] = QUERY_OBJ_ID_SIZE + VARSTR_HEADER_SIZE;
+ pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
+ strcpy(pSchema[cols].name, "sql_obj_id");
+ pSchema[cols].bytes = htons(pShow->bytes[cols]);
+ cols++;
+
+ pShow->bytes[cols] = 4;
+ pSchema[cols].type = TSDB_DATA_TYPE_INT;
+ strcpy(pSchema[cols].name, "pid");
+ pSchema[cols].bytes = htons(pShow->bytes[cols]);
+ cols++;
+
+ pShow->bytes[cols] = TSDB_EP_LEN + VARSTR_HEADER_SIZE;
+ pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
+ strcpy(pSchema[cols].name, "ep");
+ pSchema[cols].bytes = htons(pShow->bytes[cols]);
+ cols++;
+
+ pShow->bytes[cols] = 4;
+ pSchema[cols].type = TSDB_DATA_TYPE_INT;
+ strcpy(pSchema[cols].name, "sub_queries");
+ pSchema[cols].bytes = htons(pShow->bytes[cols]);
+ cols++;
+
pShow->bytes[cols] = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE;
pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
strcpy(pSchema[cols].name, "sql");
@@ -434,6 +459,29 @@ static int32_t mnodeRetrieveQueries(SShowObj *pShow, char *data, int32_t rows, v
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int64_t *)pWrite = htobe64(pDesc->useconds);
cols++;
+ /*
+ pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
+ *(int64_t *)pWrite = htobe64(pDesc->sqlObjId);
+ cols++;
+ */
+ snprintf(str, tListLen(str), "0x%08" PRIx64, htobe64(pDesc->sqlObjId));
+ pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
+ STR_WITH_MAXSIZE_TO_VARSTR(pWrite, str, pShow->bytes[cols]);
+ cols++;
+
+ pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
+ *(int32_t *)pWrite = htonl(pDesc->pid);
+ cols++;
+
+ char epBuf[TSDB_EP_LEN + 1] = {0};
+ snprintf(epBuf, tListLen(epBuf), "%s:%u", pDesc->fqdn, pConnObj->port);
+ pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
+ STR_WITH_MAXSIZE_TO_VARSTR(pWrite, epBuf, pShow->bytes[cols]);
+ cols++;
+
+ pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
+ *(int32_t *)pWrite = htonl(pDesc->numOfSub);
+ cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pDesc->sql, pShow->bytes[cols]);
diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c
index 6e5cf14b965d166e381dd1d535668580e4041d9d..0bc114ffdfe8d59f4941536b56bd95be96a03d0b 100644
--- a/src/mnode/src/mnodeTable.c
+++ b/src/mnode/src/mnodeTable.c
@@ -1812,12 +1812,8 @@ static int32_t getVgroupInfoLength(SSTableVgroupMsg* pInfo, int32_t numOfTable)
}
static char* serializeVgroupInfo(SSTableObj *pTable, char* name, char* msg, SMnodeMsg* pMsgBody, void* handle) {
- SName sn = {0};
- tNameFromString(&sn, name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
- const char* tableName = tNameGetTableName(&sn);
-
- strncpy(msg, tableName, TSDB_TABLE_NAME_LEN);
- msg += TSDB_TABLE_NAME_LEN;
+ strncpy(msg, name, TSDB_TABLE_FNAME_LEN);
+ msg += TSDB_TABLE_FNAME_LEN;
if (pTable->vgHash == NULL) {
mDebug("msg:%p, app:%p stable:%s, no vgroup exist while get stable vgroup info", pMsgBody, handle, name);
diff --git a/src/os/src/detail/osTimer.c b/src/os/src/detail/osTimer.c
index c381b3e825f508b17ec0d6a053a574957c5dc365..618df8a8bad451984fafd022a33a799986a48422 100644
--- a/src/os/src/detail/osTimer.c
+++ b/src/os/src/detail/osTimer.c
@@ -38,7 +38,7 @@ static void *taosProcessAlarmSignal(void *tharg) {
struct sigevent sevent = {{0}};
- setThreadName("alarmSignal");
+ setThreadName("tmr");
#ifdef _ALPINE
sevent.sigev_notify = SIGEV_THREAD;
diff --git a/src/plugins/http/src/httpGcJson.c b/src/plugins/http/src/httpGcJson.c
index f33a994465a94bad5d79df8af73ff4fd9d640516..2d361d37940a93c3627ef53883a342d12183e6a1 100644
--- a/src/plugins/http/src/httpGcJson.c
+++ b/src/plugins/http/src/httpGcJson.c
@@ -199,7 +199,7 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
for (int32_t i = dataFields; i >= 0; i--) {
httpJsonItemToken(jsonBuf);
- if (row[i] == NULL) {
+ if (row == NULL || i >= num_fields || row[i] == NULL) {
httpJsonOriginString(jsonBuf, "null", 4);
continue;
}
diff --git a/src/plugins/http/src/httpJson.c b/src/plugins/http/src/httpJson.c
index 10300e93670b5e10f56259d51b6ca31df3e90e39..3c72b795eef69186ef4e6308937678589224c60d 100644
--- a/src/plugins/http/src/httpJson.c
+++ b/src/plugins/http/src/httpJson.c
@@ -264,12 +264,11 @@ void httpJsonUInt64(JsonBuf* buf, uint64_t num) {
void httpJsonTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) {
char ts[35] = {0};
- struct tm* ptm;
-
+
int32_t fractionLen;
char* format = NULL;
time_t quot = 0;
- long mod = 0;
+ int64_t mod = 0;
switch (timePrecision) {
case TSDB_TIME_PRECISION_MILLI: {
@@ -301,8 +300,9 @@ void httpJsonTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) {
assert(false);
}
- ptm = localtime(");
- int32_t length = (int32_t)strftime(ts, 35, "%Y-%m-%d %H:%M:%S", ptm);
+ struct tm ptm = {0};
+ localtime_r(", &ptm);
+ int32_t length = (int32_t)strftime(ts, 35, "%Y-%m-%d %H:%M:%S", &ptm);
length += snprintf(ts + length, fractionLen, format, mod);
httpJsonString(buf, ts, length);
diff --git a/src/plugins/monitor/src/monMain.c b/src/plugins/monitor/src/monMain.c
index 960a097f5d99bb430f888d01960a879e80456d31..6e583fe0dfd809bac8c0aabf56e48bb33bd910ce 100644
--- a/src/plugins/monitor/src/monMain.c
+++ b/src/plugins/monitor/src/monMain.c
@@ -114,7 +114,7 @@ int32_t monStartSystem() {
static void *monThreadFunc(void *param) {
monDebug("starting to initialize monitor module ...");
- setThreadName("monThrd");
+ setThreadName("monitor");
while (1) {
static int32_t accessTimes = 0;
diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h
index 4581ba258d158c6cd929358a8ca15b1926397747..ce70a9ba4ab4d2dee8cab97142cac96a9a41e9cf 100644
--- a/src/query/inc/qExecutor.h
+++ b/src/query/inc/qExecutor.h
@@ -216,7 +216,7 @@ typedef struct SQueryAttr {
bool simpleAgg;
bool pointInterpQuery; // point interpolation query
bool needReverseScan; // need reverse scan
- bool distinctTag; // distinct tag query
+ bool distinct; // distinct query or not
bool stateWindow; // window State on sub/normal table
bool createFilterOperator; // if filter operator is needed
int32_t interBufSize; // intermediate buffer sizse
@@ -514,6 +514,7 @@ typedef struct SDistinctOperatorInfo {
bool recordNullVal; //has already record the null value, no need to try again
int64_t threshold;
int64_t outputCapacity;
+ int32_t colIndex;
} SDistinctOperatorInfo;
struct SGlobalMerger;
diff --git a/src/query/inc/qSqlparser.h b/src/query/inc/qSqlparser.h
index f2ab2182a1c00e2d91cdf2199ff8cc8c42bfce2f..531ff06565dba837c696c6069d409ccf536cbe8c 100644
--- a/src/query/inc/qSqlparser.h
+++ b/src/query/inc/qSqlparser.h
@@ -254,7 +254,7 @@ typedef struct tSqlExpr {
struct SArray *paramList; // function parameters list
} Expr;
- uint32_t functionId; // function id, todo remove it
+ int32_t functionId; // function id, todo remove it
SStrToken columnName; // table column info
tVariant value; // the use input value
SStrToken exprToken; // original sql expr string
diff --git a/src/query/inc/qTableMeta.h b/src/query/inc/qTableMeta.h
index 56eea6429f2406d07d532b6a9ef4910dfcc49413..4bb5483a10dc8f1b0904c17b5b3c81973115470d 100644
--- a/src/query/inc/qTableMeta.h
+++ b/src/query/inc/qTableMeta.h
@@ -60,7 +60,7 @@ typedef struct STableComInfo {
typedef struct STableMeta {
int32_t vgId;
STableId id;
- uint8_t tableType;
+ int8_t tableType;
char sTableName[TSDB_TABLE_FNAME_LEN]; // super table name
uint64_t suid; // super table id
int16_t sversion;
@@ -121,7 +121,8 @@ typedef struct SQueryInfo {
int64_t vgroupLimit; // table limit in case of super table projection query + global order + limit
int32_t udColumnId; // current user-defined constant output field column id, monotonically decreases from TSDB_UD_COLUMN_INDEX
- bool distinctTag; // distinct tag or not
+ bool distinct; // distinct tag or not
+ bool onlyHasTagCond;
int32_t round; // 0/1/....
int32_t bufLen;
char* buf;
diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c
index d96b260b13cf8a1bbcbc9b329a51bc1a714aba8d..dad05df22a5d85ec2e00911c9060fc19d6e0cb42 100644
--- a/src/query/src/qAggMain.c
+++ b/src/query/src/qAggMain.c
@@ -512,6 +512,28 @@ int32_t countRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t colId) {
int32_t noDataRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t colId) {
return BLK_DATA_NO_NEEDED;
}
+#define LIST_ADD_N_DOUBLE_FLOAT(x, ctx, p, t, numOfElem, tsdbType) \
+ do { \
+ t *d = (t *)(p); \
+ for (int32_t i = 0; i < (ctx)->size; ++i) { \
+ if (((ctx)->hasNull) && isNull((char *)&(d)[i], tsdbType)) { \
+ continue; \
+ }; \
+ SET_DOUBLE_VAL(&(x) , GET_DOUBLE_VAL(&(x)) + GET_FLOAT_VAL(&(d)[i])); \
+ (numOfElem)++; \
+ } \
+ } while(0)
+#define LIST_ADD_N_DOUBLE(x, ctx, p, t, numOfElem, tsdbType) \
+ do { \
+ t *d = (t *)(p); \
+ for (int32_t i = 0; i < (ctx)->size; ++i) { \
+ if (((ctx)->hasNull) && isNull((char *)&(d)[i], tsdbType)) { \
+ continue; \
+ }; \
+ SET_DOUBLE_VAL(&(x) , (x) + (d)[i]); \
+ (numOfElem)++; \
+ } \
+ } while(0)
#define LIST_ADD_N(x, ctx, p, t, numOfElem, tsdbType) \
do { \
@@ -575,7 +597,7 @@ static void do_sum(SQLFunctionCtx *pCtx) {
*retVal += (uint64_t)pCtx->preAggVals.statis.sum;
} else if (IS_FLOAT_TYPE(pCtx->inputType)) {
double *retVal = (double*) pCtx->pOutput;
- *retVal += GET_DOUBLE_VAL((const char*)&(pCtx->preAggVals.statis.sum));
+ SET_DOUBLE_VAL(retVal, *retVal + GET_DOUBLE_VAL((const char*)&(pCtx->preAggVals.statis.sum)));
}
} else { // computing based on the true data block
void *pData = GET_INPUT_DATA_LIST(pCtx);
@@ -607,10 +629,10 @@ static void do_sum(SQLFunctionCtx *pCtx) {
}
} else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) {
double *retVal = (double *)pCtx->pOutput;
- LIST_ADD_N(*retVal, pCtx, pData, double, notNullElems, pCtx->inputType);
+ LIST_ADD_N_DOUBLE(*retVal, pCtx, pData, double, notNullElems, pCtx->inputType);
} else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) {
double *retVal = (double *)pCtx->pOutput;
- LIST_ADD_N(*retVal, pCtx, pData, float, notNullElems, pCtx->inputType);
+ LIST_ADD_N_DOUBLE_FLOAT(*retVal, pCtx, pData, float, notNullElems, pCtx->inputType);
}
}
@@ -654,7 +676,7 @@ static void sum_func_merge(SQLFunctionCtx *pCtx) {
} else if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
*(uint64_t *) pCtx->pOutput += pInput->usum;
} else {
- *(double *)pCtx->pOutput += pInput->dsum;
+ SET_DOUBLE_VAL((double *)pCtx->pOutput, *(double *)pCtx->pOutput + pInput->dsum);
}
}
@@ -778,9 +800,9 @@ static void avg_function(SQLFunctionCtx *pCtx) {
} else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) {
LIST_ADD_N(*pVal, pCtx, pData, int64_t, notNullElems, pCtx->inputType);
} else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) {
- LIST_ADD_N(*pVal, pCtx, pData, double, notNullElems, pCtx->inputType);
+ LIST_ADD_N_DOUBLE(*pVal, pCtx, pData, double, notNullElems, pCtx->inputType);
} else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) {
- LIST_ADD_N(*pVal, pCtx, pData, float, notNullElems, pCtx->inputType);
+ LIST_ADD_N_DOUBLE_FLOAT(*pVal, pCtx, pData, float, notNullElems, pCtx->inputType);
} else if (pCtx->inputType == TSDB_DATA_TYPE_UTINYINT) {
LIST_ADD_N(*pVal, pCtx, pData, uint8_t, notNullElems, pCtx->inputType);
} else if (pCtx->inputType == TSDB_DATA_TYPE_USMALLINT) {
@@ -821,7 +843,7 @@ static void avg_func_merge(SQLFunctionCtx *pCtx) {
continue;
}
- *sum += pInput->sum;
+ SET_DOUBLE_VAL(sum, *sum + pInput->sum);
// keep the number of data into the temp buffer
*(int64_t *)GET_ROWCELL_INTERBUF(pResInfo) += pInput->num;
@@ -841,8 +863,8 @@ static void avg_finalizer(SQLFunctionCtx *pCtx) {
setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes);
return;
}
-
- *(double *)pCtx->pOutput = (*(double *)pCtx->pOutput) / *(int64_t *)GET_ROWCELL_INTERBUF(pResInfo);
+
+ SET_DOUBLE_VAL((double *)pCtx->pOutput,(*(double *)pCtx->pOutput) / *(int64_t *)GET_ROWCELL_INTERBUF(pResInfo));
} else { // this is the secondary merge, only in the secondary merge, the input type is TSDB_DATA_TYPE_BINARY
assert(IS_NUMERIC_TYPE(pCtx->inputType));
SAvgInfo *pAvgInfo = (SAvgInfo *)GET_ROWCELL_INTERBUF(pResInfo);
@@ -852,7 +874,7 @@ static void avg_finalizer(SQLFunctionCtx *pCtx) {
return;
}
- *(double *)pCtx->pOutput = pAvgInfo->sum / pAvgInfo->num;
+ SET_DOUBLE_VAL((double *)pCtx->pOutput, pAvgInfo->sum / pAvgInfo->num);
}
// cannot set the numOfIteratedElems again since it is set during previous iteration
@@ -1049,7 +1071,7 @@ static bool min_func_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResultInfo
*((float *)pCtx->pOutput) = FLT_MAX;
break;
case TSDB_DATA_TYPE_DOUBLE:
- *((double *)pCtx->pOutput) = DBL_MAX;
+ SET_DOUBLE_VAL(((double *)pCtx->pOutput), DBL_MAX);
break;
default:
qError("illegal data type:%d in min/max query", pCtx->inputType);
@@ -1076,7 +1098,7 @@ static bool max_func_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResultInfo
*((float *)pCtx->pOutput) = -FLT_MAX;
break;
case TSDB_DATA_TYPE_DOUBLE:
- *((double *)pCtx->pOutput) = -DBL_MAX;
+ SET_DOUBLE_VAL(((double *)pCtx->pOutput), -DBL_MAX);
break;
case TSDB_DATA_TYPE_BIGINT:
*((int64_t *)pCtx->pOutput) = INT64_MIN;
@@ -1322,7 +1344,7 @@ static void stddev_finalizer(SQLFunctionCtx *pCtx) {
setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes);
} else {
double *retValue = (double *)pCtx->pOutput;
- *retValue = sqrt(pStd->res / pStd->num);
+ SET_DOUBLE_VAL(retValue, sqrt(pStd->res / pStd->num));
SET_VAL(pCtx, 1, 1);
}
@@ -1455,7 +1477,7 @@ static void stddev_dst_finalizer(SQLFunctionCtx *pCtx) {
setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes);
} else {
double *retValue = (double *)pCtx->pOutput;
- *retValue = sqrt(pStd->res / pStd->num);
+ SET_DOUBLE_VAL(retValue, sqrt(pStd->res / pStd->num));
SET_VAL(pCtx, 1, 1);
}
@@ -1947,7 +1969,7 @@ static void copyTopBotRes(SQLFunctionCtx *pCtx, int32_t type) {
case TSDB_DATA_TYPE_DOUBLE: {
double *output = (double *)pCtx->pOutput;
for (int32_t i = 0; i < len; ++i, output += step) {
- *output = tvp[i]->v.dKey;
+ SET_DOUBLE_VAL(output, tvp[i]->v.dKey);
}
break;
}
@@ -2366,7 +2388,7 @@ static void percentile_finalizer(SQLFunctionCtx *pCtx) {
assert(ppInfo->numOfElems == 0);
setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes);
} else {
- *(double *)pCtx->pOutput = getPercentile(pMemBucket, v);
+ SET_DOUBLE_VAL((double *)pCtx->pOutput, getPercentile(pMemBucket, v));
}
tMemBucketDestroy(pMemBucket);
@@ -2782,7 +2804,7 @@ static void deriv_function(SQLFunctionCtx *pCtx) {
if (!pDerivInfo->valueSet) { // initial value is not set yet
pDerivInfo->valueSet = true;
} else {
- *pOutput = ((pData[i] - pDerivInfo->prevValue) * pDerivInfo->tsWindow) / (tsList[i] - pDerivInfo->prevTs);
+ SET_DOUBLE_VAL(pOutput, ((pData[i] - pDerivInfo->prevValue) * pDerivInfo->tsWindow) / (tsList[i] - pDerivInfo->prevTs));
if (pDerivInfo->ignoreNegative && *pOutput < 0) {
} else {
*pTimestamp = tsList[i];
@@ -3017,7 +3039,7 @@ static void diff_function(SQLFunctionCtx *pCtx) {
}
if (pCtx->param[1].nType != INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet
- *pOutput = pData[i] - pCtx->param[1].dKey; // direct previous may be null
+ SET_DOUBLE_VAL(pOutput, pData[i] - pCtx->param[1].dKey); // direct previous may be null
*pTimestamp = (tsList != NULL)? tsList[i]:0;
pOutput += 1;
pTimestamp += 1;
@@ -3290,7 +3312,7 @@ void spread_function_finalizer(SQLFunctionCtx *pCtx) {
return;
}
- *(double *)pCtx->pOutput = pCtx->param[3].dKey - pCtx->param[0].dKey;
+ SET_DOUBLE_VAL((double *)pCtx->pOutput, pCtx->param[3].dKey - pCtx->param[0].dKey);
} else {
assert(IS_NUMERIC_TYPE(pCtx->inputType) || (pCtx->inputType == TSDB_DATA_TYPE_TIMESTAMP));
@@ -3300,7 +3322,7 @@ void spread_function_finalizer(SQLFunctionCtx *pCtx) {
return;
}
- *(double *)pCtx->pOutput = pInfo->max - pInfo->min;
+ SET_DOUBLE_VAL((double *)pCtx->pOutput, pInfo->max - pInfo->min);
}
GET_RES_INFO(pCtx)->numOfRes = 1; // todo add test case
@@ -3628,9 +3650,9 @@ void twa_function_finalizer(SQLFunctionCtx *pCtx) {
assert(pInfo->win.ekey == pInfo->p.key && pInfo->hasResult == pResInfo->hasResult);
if (pInfo->win.ekey == pInfo->win.skey) {
- *(double *)pCtx->pOutput = pInfo->p.val;
+ SET_DOUBLE_VAL((double *)pCtx->pOutput, pInfo->p.val);
} else {
- *(double *)pCtx->pOutput = pInfo->dOutput / (pInfo->win.ekey - pInfo->win.skey);
+ SET_DOUBLE_VAL((double *)pCtx->pOutput , pInfo->dOutput / (pInfo->win.ekey - pInfo->win.skey));
}
GET_RES_INFO(pCtx)->numOfRes = 1;
@@ -3923,7 +3945,7 @@ static void rate_finalizer(SQLFunctionCtx *pCtx) {
return;
}
- *(double*) pCtx->pOutput = do_calc_rate(pRateInfo, (double) TSDB_TICK_PER_SECOND(pCtx->param[0].i64));
+ SET_DOUBLE_VAL((double*) pCtx->pOutput, do_calc_rate(pRateInfo, (double) TSDB_TICK_PER_SECOND(pCtx->param[0].i64)));
// cannot set the numOfIteratedElems again since it is set during previous iteration
pResInfo->numOfRes = 1;
@@ -3996,6 +4018,7 @@ static void mergeTableBlockDist(SResultRowCellInfo* pResInfo, const STableBlockD
pDist->numOfTables += pSrc->numOfTables;
pDist->numOfRowsInMemTable += pSrc->numOfRowsInMemTable;
+ pDist->numOfSmallBlocks += pSrc->numOfSmallBlocks;
pDist->numOfFiles += pSrc->numOfFiles;
pDist->totalSize += pSrc->totalSize;
pDist->totalRows += pSrc->totalRows;
@@ -4108,18 +4131,19 @@ void generateBlockDistResult(STableBlockDist *pTableBlockDist, char* result) {
uint64_t totalLen = pTableBlockDist->totalSize;
int32_t rowSize = pTableBlockDist->rowSize;
+ int32_t smallBlocks = pTableBlockDist->numOfSmallBlocks;
double compRatio = (totalRows>0) ? ((double)(totalLen)/(rowSize*totalRows)) : 1;
int sz = sprintf(result + VARSTR_HEADER_SIZE,
"summary: \n\t "
"5th=[%d], 10th=[%d], 20th=[%d], 30th=[%d], 40th=[%d], 50th=[%d]\n\t "
"60th=[%d], 70th=[%d], 80th=[%d], 90th=[%d], 95th=[%d], 99th=[%d]\n\t "
"Min=[%"PRId64"(Rows)] Max=[%"PRId64"(Rows)] Avg=[%"PRId64"(Rows)] Stddev=[%.2f] \n\t "
- "Rows=[%"PRIu64"], Blocks=[%"PRId64"], Size=[%.3f(Kb)] Comp=[%.2f]\n\t "
+ "Rows=[%"PRIu64"], Blocks=[%"PRId64"], SmallBlocks=[%d], Size=[%.3f(Kb)] Comp=[%.2f]\n\t "
"RowsInMem=[%d] \n\t",
percentiles[0], percentiles[1], percentiles[2], percentiles[3], percentiles[4], percentiles[5],
percentiles[6], percentiles[7], percentiles[8], percentiles[9], percentiles[10], percentiles[11],
min, max, avg, stdDev,
- totalRows, totalBlocks, totalLen/1024.0, compRatio,
+ totalRows, totalBlocks, smallBlocks, totalLen/1024.0, compRatio,
pTableBlockDist->numOfRowsInMemTable);
varDataSetLen(result, sz);
UNUSED(sz);
@@ -4132,6 +4156,11 @@ void blockinfo_func_finalizer(SQLFunctionCtx* pCtx) {
pDist->rowSize = (uint16_t)pCtx->param[0].i64;
generateBlockDistResult(pDist, pCtx->pOutput);
+ if (pDist->dataBlockInfos != NULL) {
+ taosArrayDestroy(pDist->dataBlockInfos);
+ pDist->dataBlockInfos = NULL;
+ }
+
// cannot set the numOfIteratedElems again since it is set during previous iteration
pResInfo->numOfRes = 1;
pResInfo->hasResult = DATA_SET_FLAG;
@@ -4157,8 +4186,8 @@ int32_t functionCompatList[] = {
4, -1, -1, 1, 1, 1, 1, 1, 1, -1,
// tag, colprj, tagprj, arithmetic, diff, first_dist, last_dist, stddev_dst, interp rate irate
1, 1, 1, 1, -1, 1, 1, 1, 5, 1, 1,
- // tid_tag, blk_info
- 6, 7
+ // tid_tag, derivative, blk_info
+ 6, 8, 7,
};
SAggFunctionInfo aAggs[] = {{
diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c
index 288a2064915df70990350523a694fadcf878f7e0..3f6df2ec07f89e9e91d0d9fa3b8de3ec14bb553b 100644
--- a/src/query/src/qExecutor.c
+++ b/src/query/src/qExecutor.c
@@ -30,6 +30,7 @@
#include "tcompare.h"
#include "tscompression.h"
#include "qScript.h"
+#include "tscLog.h"
#define IS_MASTER_SCAN(runtime) ((runtime)->scanFlag == MASTER_SCAN)
#define IS_REVERSE_SCAN(runtime) ((runtime)->scanFlag == REVERSE_SCAN)
@@ -97,12 +98,47 @@ static UNUSED_FUNC void* u_realloc(void* p, size_t __size) {
#define GET_NUM_OF_TABLEGROUP(q) taosArrayGetSize((q)->tableqinfoGroupInfo.pGroupList)
#define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->interval.interval > 0)
+#define TSKEY_MAX_ADD(a,b) \
+do { \
+ if (a < 0) { a = a + b; break;} \
+ if (sizeof(a) == sizeof(int32_t)) { \
+ if((b) > 0 && ((b) >= INT32_MAX - (a))){\
+ a = INT32_MAX; \
+ } else { \
+ a = a + b; \
+ } \
+ } else { \
+ if((b) > 0 && ((b) >= INT64_MAX - (a))){\
+ a = INT64_MAX; \
+ } else { \
+ a = a + b; \
+ } \
+ } \
+} while(0)
+
+#define TSKEY_MIN_SUB(a,b) \
+do { \
+ if (a >= 0) { a = a + b; break;} \
+ if (sizeof(a) == sizeof(int32_t)){ \
+ if((b) < 0 && ((b) <= INT32_MIN - (a))){\
+ a = INT32_MIN; \
+ } else { \
+ a = a + b; \
+ } \
+ } else { \
+ if((b) < 0 && ((b) <= INT64_MIN-(a))) {\
+ a = INT64_MIN; \
+ } else { \
+ a = a + b; \
+ } \
+ } \
+} while (0)
+
uint64_t queryHandleId = 0;
int32_t getMaximumIdleDurationSec() {
return tsShellActivityTimer * 2;
}
-
int64_t genQueryId(void) {
int64_t uid = 0;
int64_t did = tsDnodeId;
@@ -149,12 +185,12 @@ static void getNextTimeWindow(SQueryAttr* pQueryAttr, STimeWindow* tw) {
int mon = (int)(tm.tm_year * 12 + tm.tm_mon + interval * factor);
tm.tm_year = mon / 12;
tm.tm_mon = mon % 12;
- tw->skey = convertTimePrecision(mktime(&tm) * 1000L, TSDB_TIME_PRECISION_MILLI, pQueryAttr->precision);
+ tw->skey = convertTimePrecision((int64_t)mktime(&tm) * 1000L, TSDB_TIME_PRECISION_MILLI, pQueryAttr->precision);
mon = (int)(mon + interval);
tm.tm_year = mon / 12;
tm.tm_mon = mon % 12;
- tw->ekey = convertTimePrecision(mktime(&tm) * 1000L, TSDB_TIME_PRECISION_MILLI, pQueryAttr->precision);
+ tw->ekey = convertTimePrecision((int64_t)mktime(&tm) * 1000L, TSDB_TIME_PRECISION_MILLI, pQueryAttr->precision);
tw->ekey -= 1;
}
@@ -752,7 +788,7 @@ static int32_t getNumOfRowsInTimeWindow(SQueryRuntimeEnv* pRuntimeEnv, SDataBloc
int32_t step = GET_FORWARD_DIRECTION_FACTOR(order);
if (QUERY_IS_ASC_QUERY(pQueryAttr)) {
- if (ekey < pDataBlockInfo->window.ekey) {
+ if (ekey < pDataBlockInfo->window.ekey && pPrimaryColumn) {
num = getForwardStepsInBlock(pDataBlockInfo->rows, searchFn, ekey, startPos, order, pPrimaryColumn);
if (updateLastKey) { // update the last key
item->lastKey = pPrimaryColumn[startPos + (num - 1)] + step;
@@ -764,7 +800,7 @@ static int32_t getNumOfRowsInTimeWindow(SQueryRuntimeEnv* pRuntimeEnv, SDataBloc
}
}
} else { // desc
- if (ekey > pDataBlockInfo->window.skey) {
+ if (ekey > pDataBlockInfo->window.skey && pPrimaryColumn) {
num = getForwardStepsInBlock(pDataBlockInfo->rows, searchFn, ekey, startPos, order, pPrimaryColumn);
if (updateLastKey) { // update the last key
item->lastKey = pPrimaryColumn[startPos - (num - 1)] + step;
@@ -1301,6 +1337,10 @@ static void doWindowBorderInterpolation(SOperatorInfo* pOperatorInfo, SSDataBloc
assert(pBlock != NULL);
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQueryAttr->order.order);
+ if (pBlock->pDataBlock == NULL){
+ tscError("pBlock->pDataBlock == NULL");
+ return;
+ }
SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, 0);
TSKEY *tsCols = (TSKEY *)(pColInfo->pData);
@@ -2427,7 +2467,7 @@ static void updateDataCheckOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bool
if (pQueryAttr->pointInterpQuery && pQueryAttr->interval.interval == 0) {
if (!QUERY_IS_ASC_QUERY(pQueryAttr)) {
- qDebug(msg, pQInfo, "interp", pQueryAttr->order.order, TSDB_ORDER_ASC, pQueryAttr->window.skey, pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey);
+ qDebug(msg, pQInfo->qId, "interp", pQueryAttr->order.order, TSDB_ORDER_ASC, pQueryAttr->window.skey, pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey);
SWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey, TSKEY);
}
@@ -2438,7 +2478,7 @@ static void updateDataCheckOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bool
if (pQueryAttr->interval.interval == 0) {
if (onlyFirstQuery(pQueryAttr)) {
if (!QUERY_IS_ASC_QUERY(pQueryAttr)) {
- qDebug(msg, pQInfo, "only-first", pQueryAttr->order.order, TSDB_ORDER_ASC, pQueryAttr->window.skey,
+ qDebug(msg, pQInfo->qId, "only-first", pQueryAttr->order.order, TSDB_ORDER_ASC, pQueryAttr->window.skey,
pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey);
SWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey, TSKEY);
@@ -2449,7 +2489,7 @@ static void updateDataCheckOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bool
pQueryAttr->needReverseScan = false;
} else if (onlyLastQuery(pQueryAttr) && notContainSessionOrStateWindow(pQueryAttr)) {
if (QUERY_IS_ASC_QUERY(pQueryAttr)) {
- qDebug(msg, pQInfo, "only-last", pQueryAttr->order.order, TSDB_ORDER_DESC, pQueryAttr->window.skey,
+ qDebug(msg, pQInfo->qId, "only-last", pQueryAttr->order.order, TSDB_ORDER_DESC, pQueryAttr->window.skey,
pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey);
SWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey, TSKEY);
@@ -2464,7 +2504,7 @@ static void updateDataCheckOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bool
if (stableQuery) {
if (onlyFirstQuery(pQueryAttr)) {
if (!QUERY_IS_ASC_QUERY(pQueryAttr)) {
- qDebug(msg, pQInfo, "only-first stable", pQueryAttr->order.order, TSDB_ORDER_ASC,
+ qDebug(msg, pQInfo->qId, "only-first stable", pQueryAttr->order.order, TSDB_ORDER_ASC,
pQueryAttr->window.skey, pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey);
SWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey, TSKEY);
@@ -2475,7 +2515,7 @@ static void updateDataCheckOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bool
pQueryAttr->needReverseScan = false;
} else if (onlyLastQuery(pQueryAttr)) {
if (QUERY_IS_ASC_QUERY(pQueryAttr)) {
- qDebug(msg, pQInfo, "only-last stable", pQueryAttr->order.order, TSDB_ORDER_DESC,
+ qDebug(msg, pQInfo->qId, "only-last stable", pQueryAttr->order.order, TSDB_ORDER_DESC,
pQueryAttr->window.skey, pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey);
SWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey, TSKEY);
@@ -3124,7 +3164,9 @@ void setTagValue(SOperatorInfo* pOperatorInfo, void *pTable, SQLFunctionCtx* pCt
|| pLocalExprInfo->base.resType == TSDB_DATA_TYPE_TIMESTAMP) {
memcpy(pRuntimeEnv->tagVal + offset, &pCtx[idx].tag.i64, pLocalExprInfo->base.resBytes);
} else {
- memcpy(pRuntimeEnv->tagVal + offset, pCtx[idx].tag.pz, pCtx[idx].tag.nLen);
+ if (pCtx[idx].tag.pz != NULL) {
+ memcpy(pRuntimeEnv->tagVal + offset, pCtx[idx].tag.pz, pCtx[idx].tag.nLen);
+ }
}
offset += pLocalExprInfo->base.resBytes;
@@ -3563,6 +3605,7 @@ STableQueryInfo* createTmpTableQueryInfo(STimeWindow win) {
int32_t initialSize = 16;
int32_t code = initResultRowInfo(&pTableQueryInfo->resInfo, initialSize, TSDB_DATA_TYPE_INT);
if (code != TSDB_CODE_SUCCESS) {
+ tfree(pTableQueryInfo);
return NULL;
}
@@ -3934,8 +3977,8 @@ static void toSSDataBlock(SGroupResInfo *pGroupResInfo, SQueryRuntimeEnv* pRunti
// refactor : extract method
SColumnInfoData* pInfoData = taosArrayGet(pBlock->pDataBlock, 0);
-
- if (pInfoData->info.type == TSDB_DATA_TYPE_TIMESTAMP) {
+ //add condition (pBlock->info.rows >= 1) just to runtime happy
+ if (pInfoData->info.type == TSDB_DATA_TYPE_TIMESTAMP && pBlock->info.rows >= 1) {
STimeWindow* w = &pBlock->info.window;
w->skey = *(int64_t*)pInfoData->pData;
w->ekey = *(int64_t*)(((char*)pInfoData->pData) + TSDB_KEYSIZE * (pBlock->info.rows - 1));
@@ -5273,7 +5316,15 @@ static SSDataBlock* doSTableAggregate(void* param, bool* newgroup) {
// the pDataBlock are always the same one, no need to call this again
setInputDataBlock(pOperator, pInfo->pCtx, pBlock, order);
- TSKEY key = QUERY_IS_ASC_QUERY(pQueryAttr)? pBlock->info.window.ekey + 1:pBlock->info.window.skey-1;
+ TSKEY key = 0;
+ if (QUERY_IS_ASC_QUERY(pQueryAttr)) {
+ key = pBlock->info.window.ekey;
+ TSKEY_MAX_ADD(key, 1);
+ } else {
+ key = pBlock->info.window.skey;
+ TSKEY_MIN_SUB(key, -1);
+ }
+
setExecutionContext(pRuntimeEnv, pInfo, pOperator->numOfOutput, pRuntimeEnv->current->groupIndex, key);
doAggregateImpl(pOperator, pQueryAttr->window.skey, pInfo->pCtx, pBlock);
}
@@ -5510,6 +5561,8 @@ static SSDataBlock* doIntervalAgg(void* param, bool* newgroup) {
break;
}
+ setTagValue(pOperator, pRuntimeEnv->current->pTable, pIntervalInfo->pCtx, pOperator->numOfOutput);
+
// the pDataBlock are always the same one, no need to call this again
setInputDataBlock(pOperator, pIntervalInfo->pCtx, pBlock, pQueryAttr->order.order);
hashIntervalAgg(pOperator, &pIntervalInfo->resultRowInfo, pBlock, 0);
@@ -6479,7 +6532,7 @@ static SSDataBlock* doTagScan(void* param, bool* newgroup) {
pOperator->status = OP_EXEC_DONE;
qDebug("QInfo:0x%"PRIx64" create count(tbname) query, res:%d rows:1", GET_QID(pRuntimeEnv), count);
} else { // return only the tags|table name etc.
- SExprInfo* pExprInfo = pOperator->pExpr; // todo use the column list instead of exprinfo
+ SExprInfo* pExprInfo = &pOperator->pExpr[0]; // todo use the column list instead of exprinfo
count = 0;
while(pInfo->curPos < pInfo->totalTables && count < maxNumOfTables) {
@@ -6554,9 +6607,11 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) {
return NULL;
}
+
SDistinctOperatorInfo* pInfo = pOperator->info;
SSDataBlock* pRes = pInfo->pRes;
+
pRes->info.rows = 0;
SSDataBlock* pBlock = NULL;
while(1) {
@@ -6565,13 +6620,25 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) {
publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
if (pBlock == NULL) {
+ setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
+ pOperator->status = OP_EXEC_DONE;
+ break;
+ }
+ if (pInfo->colIndex == -1) {
+ for (int i = 0; i < taosArrayGetSize(pBlock->pDataBlock); i++) {
+ SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, i);
+ if (pColDataInfo->info.colId == pOperator->pExpr[0].base.resColId) {
+ pInfo->colIndex = i;
+ break;
+ }
+ }
+ }
+ if (pInfo->colIndex == -1) {
setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
pOperator->status = OP_EXEC_DONE;
return NULL;
}
-
- assert(pBlock->info.numOfCols == 1);
- SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, 0);
+ SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, pInfo->colIndex);
int16_t bytes = pColInfoData->info.bytes;
int16_t type = pColInfoData->info.type;
@@ -6594,10 +6661,19 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) {
if (isNull(val, type)) {
continue;
}
- int dummy;
- void* res = taosHashGet(pInfo->pSet, val, bytes);
+
+ size_t keyLen = 0;
+ if (IS_VAR_DATA_TYPE(pOperator->pExpr->base.colType)) {
+ tstr* var = (tstr*)(val);
+ keyLen = varDataLen(var);
+ } else {
+ keyLen = bytes;
+ }
+
+ int dummy;
+ void* res = taosHashGet(pInfo->pSet, val, keyLen);
if (res == NULL) {
- taosHashPut(pInfo->pSet, val, bytes, &dummy, sizeof(dummy));
+ taosHashPut(pInfo->pSet, val, keyLen, &dummy, sizeof(dummy));
char* start = pResultColInfoData->pData + bytes * pInfo->pRes->info.rows;
memcpy(start, val, bytes);
pRes->info.rows += 1;
@@ -6614,7 +6690,8 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) {
SOperatorInfo* createDistinctOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) {
SDistinctOperatorInfo* pInfo = calloc(1, sizeof(SDistinctOperatorInfo));
-
+ pInfo->colIndex = -1;
+ pInfo->threshold = 10000000; // distinct result threshold
pInfo->outputCapacity = 4096;
pInfo->pSet = taosHashInit(64, taosGetDefaultHashFunction(pExpr->base.colType), false, HASH_NO_LOCK);
pInfo->pRes = createOutputBuf(pExpr, numOfOutput, (int32_t) pInfo->outputCapacity);
@@ -6624,10 +6701,12 @@ SOperatorInfo* createDistinctOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperat
pOperator->blockingOptr = false;
pOperator->status = OP_IN_EXECUTING;
pOperator->operatorType = OP_Distinct;
+ pOperator->pExpr = pExpr;
pOperator->numOfOutput = numOfOutput;
pOperator->info = pInfo;
pOperator->pRuntimeEnv = pRuntimeEnv;
pOperator->exec = hashDistinct;
+ pOperator->pExpr = pExpr;
pOperator->cleanup = destroyDistinctOperatorInfo;
appendUpstream(pOperator, upstream);
@@ -7231,9 +7310,7 @@ void destroyUdfInfo(SUdfInfo* pUdfInfo) {
tfree(pUdfInfo);
}
-static char* getUdfFuncName(char* name, int type) {
- char* funcname = calloc(1, TSDB_FUNCTIONS_NAME_MAX_LENGTH + 10);
-
+static char* getUdfFuncName(char* funcname, char* name, int type) {
switch (type) {
case TSDB_UDF_FUNC_NORMAL:
strcpy(funcname, name);
@@ -7304,19 +7381,20 @@ int32_t initUdfInfo(SUdfInfo* pUdfInfo) {
return TSDB_CODE_QRY_SYS_ERROR;
}
- pUdfInfo->funcs[TSDB_UDF_FUNC_NORMAL] = taosLoadSym(pUdfInfo->handle, getUdfFuncName(pUdfInfo->name, TSDB_UDF_FUNC_NORMAL));
+ char funcname[TSDB_FUNCTIONS_NAME_MAX_LENGTH + 10] = {0};
+ pUdfInfo->funcs[TSDB_UDF_FUNC_NORMAL] = taosLoadSym(pUdfInfo->handle, getUdfFuncName(funcname, pUdfInfo->name, TSDB_UDF_FUNC_NORMAL));
if (NULL == pUdfInfo->funcs[TSDB_UDF_FUNC_NORMAL]) {
return TSDB_CODE_QRY_SYS_ERROR;
}
- pUdfInfo->funcs[TSDB_UDF_FUNC_INIT] = taosLoadSym(pUdfInfo->handle, getUdfFuncName(pUdfInfo->name, TSDB_UDF_FUNC_INIT));
+ pUdfInfo->funcs[TSDB_UDF_FUNC_INIT] = taosLoadSym(pUdfInfo->handle, getUdfFuncName(funcname, pUdfInfo->name, TSDB_UDF_FUNC_INIT));
if (pUdfInfo->funcType == TSDB_UDF_TYPE_AGGREGATE) {
- pUdfInfo->funcs[TSDB_UDF_FUNC_FINALIZE] = taosLoadSym(pUdfInfo->handle, getUdfFuncName(pUdfInfo->name, TSDB_UDF_FUNC_FINALIZE));
- pUdfInfo->funcs[TSDB_UDF_FUNC_MERGE] = taosLoadSym(pUdfInfo->handle, getUdfFuncName(pUdfInfo->name, TSDB_UDF_FUNC_MERGE));
+ pUdfInfo->funcs[TSDB_UDF_FUNC_FINALIZE] = taosLoadSym(pUdfInfo->handle, getUdfFuncName(funcname, pUdfInfo->name, TSDB_UDF_FUNC_FINALIZE));
+ pUdfInfo->funcs[TSDB_UDF_FUNC_MERGE] = taosLoadSym(pUdfInfo->handle, getUdfFuncName(funcname, pUdfInfo->name, TSDB_UDF_FUNC_MERGE));
}
- pUdfInfo->funcs[TSDB_UDF_FUNC_DESTROY] = taosLoadSym(pUdfInfo->handle, getUdfFuncName(pUdfInfo->name, TSDB_UDF_FUNC_DESTROY));
+ pUdfInfo->funcs[TSDB_UDF_FUNC_DESTROY] = taosLoadSym(pUdfInfo->handle, getUdfFuncName(funcname, pUdfInfo->name, TSDB_UDF_FUNC_DESTROY));
if (pUdfInfo->funcs[TSDB_UDF_FUNC_INIT]) {
return (*(udfInitFunc)pUdfInfo->funcs[TSDB_UDF_FUNC_INIT])(&pUdfInfo->init);
@@ -7388,10 +7466,12 @@ int32_t createQueryFunc(SQueriedTableInfo* pTableInfo, int32_t numOfOutput, SExp
int32_t j = getColumnIndexInSource(pTableInfo, &pExprs[i].base, pTagCols);
if (TSDB_COL_IS_TAG(pExprs[i].base.colInfo.flag)) {
if (j < TSDB_TBNAME_COLUMN_INDEX || j >= pTableInfo->numOfTags) {
+ tfree(pExprs);
return TSDB_CODE_QRY_INVALID_MSG;
}
} else {
if (j < PRIMARYKEY_TIMESTAMP_COL_INDEX || j >= pTableInfo->numOfCols) {
+ tfree(pExprs);
return TSDB_CODE_QRY_INVALID_MSG;
}
}
@@ -7411,6 +7491,7 @@ int32_t createQueryFunc(SQueriedTableInfo* pTableInfo, int32_t numOfOutput, SExp
int32_t ret = cloneExprFilterInfo(&pExprs[i].base.flist.filterInfo, pExprMsg[i]->flist.filterInfo,
pExprMsg[i]->flist.numOfFilters);
if (ret) {
+ tfree(pExprs);
return ret;
}
}
@@ -7529,7 +7610,7 @@ SGroupbyExpr *createGroupbyExprFromMsg(SQueryTableMsg *pQueryMsg, SColIndex *pCo
int32_t doCreateFilterInfo(SColumnInfo* pCols, int32_t numOfCols, int32_t numOfFilterCols, SSingleColumnFilterInfo** pFilterInfo, uint64_t qId) {
*pFilterInfo = calloc(1, sizeof(SSingleColumnFilterInfo) * numOfFilterCols);
- if (pFilterInfo == NULL) {
+ if (*pFilterInfo == NULL) {
return TSDB_CODE_QRY_OUT_OF_MEMORY;
}
diff --git a/src/query/src/qExtbuffer.c b/src/query/src/qExtbuffer.c
index 1fe2819ea2347f8e092a3955f6d4c3269e148294..cc47cc824bcf59f0839bc5a439d4d15b89e030ea 100644
--- a/src/query/src/qExtbuffer.c
+++ b/src/query/src/qExtbuffer.c
@@ -391,11 +391,18 @@ int32_t columnValueAscendingComparator(char *f1, char *f2, int32_t type, int32_t
};
case TSDB_DATA_TYPE_NCHAR: { // todo handle the var string compare
- int32_t ret = tasoUcs4Compare(f1, f2, bytes);
- if (ret == 0) {
- return 0;
+ int32_t len1 = varDataLen(f1);
+ int32_t len2 = varDataLen(f2);
+
+ if (len1 != len2) {
+ return len1 > len2 ? 1 : -1;
+ } else {
+ int32_t ret = tasoUcs4Compare(varDataVal(f1), varDataVal(f2), len1);
+ if (ret == 0) {
+ return 0;
+ }
+ return (ret < 0) ? -1 : 1;
}
- return (ret < 0) ? -1 : 1;
};
case TSDB_DATA_TYPE_UTINYINT: DEFAULT_COMP(GET_UINT8_VAL(f1), GET_UINT8_VAL(f2));
case TSDB_DATA_TYPE_USMALLINT: DEFAULT_COMP(GET_UINT16_VAL(f1), GET_UINT16_VAL(f2));
diff --git a/src/query/src/qPlan.c b/src/query/src/qPlan.c
index e724b0418c5fe5e9a34459e09cf37c535d3236f2..e5b9b68536a5de3e1cc8f11bff2d88ab77491f44 100644
--- a/src/query/src/qPlan.c
+++ b/src/query/src/qPlan.c
@@ -40,7 +40,7 @@ static SQueryNode* createQueryNode(int32_t type, const char* name, SQueryNode**
pNode->info.type = type;
pNode->info.name = strdup(name);
- if (pTableInfo->id.uid != 0) { // it is a true table
+ if (pTableInfo->id.uid != 0 && pTableInfo->tableName) { // it is a true table
pNode->tableInfo.id = pTableInfo->id;
pNode->tableInfo.tableName = strdup(pTableInfo->tableName);
}
@@ -104,7 +104,7 @@ static SQueryNode* doAddTableColumnNode(SQueryInfo* pQueryInfo, STableMetaInfo*
int32_t num = (int32_t) taosArrayGetSize(pExprs);
SQueryNode* pNode = createQueryNode(QNODE_TAGSCAN, "TableTagScan", NULL, 0, pExprs->pData, num, info, NULL);
- if (pQueryInfo->distinctTag) {
+ if (pQueryInfo->distinct) {
pNode = createQueryNode(QNODE_DISTINCT, "Distinct", &pNode, 1, pExprs->pData, num, info, NULL);
}
@@ -222,6 +222,7 @@ SArray* createQueryPlanImpl(SQueryInfo* pQueryInfo) {
if (pQueryInfo->numOfTables > 1) { // it is a join query
// 1. separate the select clause according to table
+ taosArrayDestroy(upstream);
upstream = taosArrayInit(5, POINTER_BYTES);
for(int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
@@ -231,6 +232,7 @@ SArray* createQueryPlanImpl(SQueryInfo* pQueryInfo) {
SArray* exprList = taosArrayInit(4, POINTER_BYTES);
if (tscExprCopy(exprList, pQueryInfo->exprList, uid, true) != 0) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ tscExprDestroy(exprList);
exit(-1);
}
@@ -245,6 +247,8 @@ SArray* createQueryPlanImpl(SQueryInfo* pQueryInfo) {
// 4. add the projection query node
SQueryNode* pNode = doAddTableColumnNode(pQueryInfo, pTableMetaInfo, &info, exprList, tableColumnList);
+ tscColumnListDestroy(tableColumnList);
+ tscExprDestroy(exprList);
taosArrayPush(upstream, &pNode);
}
@@ -551,9 +555,11 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) {
int32_t op = 0;
if (onlyQueryTags(pQueryAttr)) { // do nothing for tags query
- op = OP_TagScan;
- taosArrayPush(plan, &op);
- if (pQueryAttr->distinctTag) {
+ if (onlyQueryTags(pQueryAttr)) {
+ op = OP_TagScan;
+ taosArrayPush(plan, &op);
+ }
+ if (pQueryAttr->distinct) {
op = OP_Distinct;
taosArrayPush(plan, &op);
}
@@ -630,8 +636,13 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) {
} else {
op = OP_Project;
taosArrayPush(plan, &op);
+ if (pQueryAttr->distinct) {
+ op = OP_Distinct;
+ taosArrayPush(plan, &op);
+ }
}
}
+
if (pQueryAttr->limit.limit > 0 || pQueryAttr->limit.offset > 0) {
op = OP_Limit;
@@ -651,7 +662,7 @@ SArray* createGlobalMergePlan(SQueryAttr* pQueryAttr) {
int32_t op = OP_MultiwayMergeSort;
taosArrayPush(plan, &op);
- if (pQueryAttr->distinctTag) {
+ if (pQueryAttr->distinct) {
op = OP_Distinct;
taosArrayPush(plan, &op);
}
diff --git a/src/query/src/qResultbuf.c b/src/query/src/qResultbuf.c
index 05ecf2e9b1163e1f858bb6a1b08e0df7eaabcab0..63eba51d6b5f9832d162336574c59aa964634bf3 100644
--- a/src/query/src/qResultbuf.c
+++ b/src/query/src/qResultbuf.c
@@ -78,8 +78,9 @@ static char* doDecompressData(void* data, int32_t srcSize, int32_t *dst, SDiskba
}
*dst = tsDecompressString(data, srcSize, 1, pResultBuf->assistBuf, pResultBuf->pageSize, ONE_STAGE_COMP, NULL, 0);
-
- memcpy(data, pResultBuf->assistBuf, *dst);
+ if (*dst > 0) {
+ memcpy(data, pResultBuf->assistBuf, *dst);
+ }
return data;
}
diff --git a/src/query/src/qScript.c b/src/query/src/qScript.c
index 261164a84c0b347adc36e3e2abaf2113d5564436..c43b0b3435b2073d4711bbb8a0ec0d9e347b0d13 100644
--- a/src/query/src/qScript.c
+++ b/src/query/src/qScript.c
@@ -342,6 +342,7 @@ int32_t scriptEnvPoolInit() {
env->lua_state = createLuaEnv();
tdListAppend(pool->scriptEnvs, (void *)(&env));
}
+
pool->mSize = size;
pool->cSize = size;
return 0;
@@ -377,9 +378,11 @@ ScriptEnv* getScriptEnvFromPool() {
return NULL;
}
SListNode *pNode = tdListPopHead(pool->scriptEnvs);
- tdListNodeGetData(pool->scriptEnvs, pNode, (void *)(&pEnv));
- listNodeFree(pNode);
-
+ if (pNode){
+ tdListNodeGetData(pool->scriptEnvs, pNode, (void *)(&pEnv));
+ listNodeFree(pNode);
+ }
+
pool->cSize--;
pthread_mutex_unlock(&pool->mutex);
return pEnv;
diff --git a/src/query/src/qSqlParser.c b/src/query/src/qSqlParser.c
index 874ec7b692489373e297dea04ed69915e17024a8..08c55d5d7e5edf71a059635fe61445119b2f19c2 100644
--- a/src/query/src/qSqlParser.c
+++ b/src/query/src/qSqlParser.c
@@ -142,14 +142,17 @@ tSqlExpr *tSqlExprCreateIdValue(SStrToken *pToken, int32_t optrType) {
}
if (optrType == TK_NULL) {
- pToken->type = TSDB_DATA_TYPE_NULL;
- tVariantCreate(&pSqlExpr->value, pToken);
+ if (pToken){
+ pToken->type = TSDB_DATA_TYPE_NULL;
+ tVariantCreate(&pSqlExpr->value, pToken);
+ }
pSqlExpr->tokenId = optrType;
pSqlExpr->type = SQL_NODE_VALUE;
} else if (optrType == TK_INTEGER || optrType == TK_STRING || optrType == TK_FLOAT || optrType == TK_BOOL) {
- toTSDBType(pToken->type);
-
- tVariantCreate(&pSqlExpr->value, pToken);
+ if (pToken) {
+ toTSDBType(pToken->type);
+ tVariantCreate(&pSqlExpr->value, pToken);
+ }
pSqlExpr->tokenId = optrType;
pSqlExpr->type = SQL_NODE_VALUE;
} else if (optrType == TK_NOW) {
@@ -162,9 +165,11 @@ tSqlExpr *tSqlExprCreateIdValue(SStrToken *pToken, int32_t optrType) {
} else if (optrType == TK_VARIABLE) {
// use nanosecond by default
// TODO set value after getting database precision
- int32_t ret = parseAbsoluteDuration(pToken->z, pToken->n, &pSqlExpr->value.i64, TSDB_TIME_PRECISION_NANO);
- if (ret != TSDB_CODE_SUCCESS) {
- terrno = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
+ if (pToken) {
+ int32_t ret = parseAbsoluteDuration(pToken->z, pToken->n, &pSqlExpr->value.i64, TSDB_TIME_PRECISION_NANO);
+ if (ret != TSDB_CODE_SUCCESS) {
+ terrno = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
+ }
}
pSqlExpr->flags |= 1 << EXPR_FLAG_NS_TIMESTAMP;
@@ -340,8 +345,9 @@ static FORCE_INLINE int32_t tStrTokenCompare(SStrToken* left, SStrToken* right)
return (left->type == right->type && left->n == right->n && strncasecmp(left->z, right->z, left->n) == 0) ? 0 : 1;
}
+// this function is not used for temporary
int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right) {
- if ((left == NULL && right) || (left && right == NULL)) {
+ if ((left == NULL && right) || (left && right == NULL) || (left == NULL && right == NULL)) {
return 1;
}
@@ -712,9 +718,8 @@ void tSetColumnType(TAOS_FIELD *pField, SStrToken *type) {
} else {
int32_t bytes = -(int32_t)(type->type);
if (bytes > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) {
- // we have to postpone reporting the error because it cannot be done here
- // as pField->bytes is int16_t, use 'TSDB_MAX_NCHAR_LEN + 1' to avoid overflow
- bytes = TSDB_MAX_NCHAR_LEN + 1;
+ // overflowed. set bytes to -1 so that error can be reported
+ bytes = -1;
} else {
bytes = bytes * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE;
}
@@ -727,8 +732,8 @@ void tSetColumnType(TAOS_FIELD *pField, SStrToken *type) {
} else {
int32_t bytes = -(int32_t)(type->type);
if (bytes > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) {
- // refer comment for NCHAR above
- bytes = TSDB_MAX_BINARY_LEN + 1;
+ // overflowed. set bytes to -1 so that error can be reported
+ bytes = -1;
} else {
bytes += VARSTR_HEADER_SIZE;
}
diff --git a/src/query/src/qTableMeta.c b/src/query/src/qTableMeta.c
index d25d6b7004b1dcf52fca97e3d27465e92f8de4f2..f687b8aa1ffc530d0c4a71c553809dd3bfb83932 100644
--- a/src/query/src/qTableMeta.c
+++ b/src/query/src/qTableMeta.c
@@ -72,7 +72,7 @@ SSchema* tscGetColumnSchemaById(STableMeta* pTableMeta, int16_t colId) {
}
STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg) {
- assert(pTableMetaMsg != NULL && pTableMetaMsg->numOfColumns >= 2 && pTableMetaMsg->numOfTags >= 0);
+ assert(pTableMetaMsg != NULL && pTableMetaMsg->numOfColumns >= 2);
int32_t schemaSize = (pTableMetaMsg->numOfColumns + pTableMetaMsg->numOfTags) * sizeof(SSchema);
STableMeta* pTableMeta = calloc(1, sizeof(STableMeta) + schemaSize);
diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c
index 04a7079128ac035542611f06559409a81bc43cf1..a3d2e424d23e5ee566bc54117d3fc421d5b42d78 100644
--- a/src/query/src/qUtil.c
+++ b/src/query/src/qUtil.c
@@ -583,6 +583,7 @@ void blockDistInfoToBinary(STableBlockDist* pDist, struct SBufferWriter* bw) {
tbufWriteInt32(bw, pDist->maxRows);
tbufWriteInt32(bw, pDist->minRows);
tbufWriteUint32(bw, pDist->numOfRowsInMemTable);
+ tbufWriteUint32(bw, pDist->numOfSmallBlocks);
tbufWriteUint64(bw, taosArrayGetSize(pDist->dataBlockInfos));
// compress the binary string
@@ -621,6 +622,7 @@ void blockDistInfoFromBinary(const char* data, int32_t len, STableBlockDist* pDi
pDist->maxRows = tbufReadInt32(&br);
pDist->minRows = tbufReadInt32(&br);
pDist->numOfRowsInMemTable = tbufReadUint32(&br);
+ pDist->numOfSmallBlocks = tbufReadUint32(&br);
int64_t numSteps = tbufReadUint64(&br);
bool comp = tbufReadUint8(&br);
diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c
index 0d140d5ffbca980f72576a39283b4765ed14da68..7d30f7c66812c0feb9c7ac37db9d7330fc2f37fb 100644
--- a/src/query/src/queryMain.c
+++ b/src/query/src/queryMain.c
@@ -261,7 +261,7 @@ int32_t qRetrieveQueryResultInfo(qinfo_t qinfo, bool* buildRes, void* pRspContex
SQInfo *pQInfo = (SQInfo *)qinfo;
if (pQInfo == NULL || !isValidQInfo(pQInfo)) {
- qError("QInfo:0x%"PRIx64" invalid qhandle", pQInfo->qId);
+ qError("QInfo invalid qhandle");
return TSDB_CODE_QRY_INVALID_QHANDLE;
}
diff --git a/src/rpc/src/rpcTcp.c b/src/rpc/src/rpcTcp.c
index e9feeef9d339a5b1a96e41fd35fb6c62e13ed94b..0449ecac8b228662455930b8caf7ff2b5a2da7b2 100644
--- a/src/rpc/src/rpcTcp.c
+++ b/src/rpc/src/rpcTcp.c
@@ -529,10 +529,9 @@ static void *taosProcessTcpData(void *param) {
SFdObj *pFdObj;
struct epoll_event events[maxEvents];
SRecvInfo recvInfo;
- char name[16];
- memset(name, 0, sizeof(name));
- snprintf(name, 16, "%s-tcpData", pThreadObj->label);
+ char name[16] = {0};
+ snprintf(name, tListLen(name), "%s-tcp", pThreadObj->label);
setThreadName(name);
while (1) {
diff --git a/src/rpc/test/rclient.c b/src/rpc/test/rclient.c
index de30114bd1c7fa9687a6d75bca3d7158137e29e4..2f4433f1bb32e965de66a40d7d6ae36c6804a06c 100644
--- a/src/rpc/test/rclient.c
+++ b/src/rpc/test/rclient.c
@@ -48,8 +48,6 @@ static void *sendRequest(void *param) {
SInfo *pInfo = (SInfo *)param;
SRpcMsg rpcMsg = {0};
- setThreadName("sendCliReq");
-
tDebug("thread:%d, start to send request", pInfo->index);
while ( pInfo->numOfReqs == 0 || pInfo->num < pInfo->numOfReqs) {
diff --git a/src/rpc/test/rsclient.c b/src/rpc/test/rsclient.c
index 3e94a56efb3494ac5fe1942245abd0bad8815ee7..65170d4abb2745a469dfda3e4146c2ea85405b33 100644
--- a/src/rpc/test/rsclient.c
+++ b/src/rpc/test/rsclient.c
@@ -40,9 +40,7 @@ static int terror = 0;
static void *sendRequest(void *param) {
SInfo *pInfo = (SInfo *)param;
SRpcMsg rpcMsg, rspMsg;
-
- setThreadName("sendSrvReq");
-
+
tDebug("thread:%d, start to send request", pInfo->index);
while ( pInfo->numOfReqs == 0 || pInfo->num < pInfo->numOfReqs) {
diff --git a/src/sync/inc/syncTcp.h b/src/sync/inc/syncTcp.h
index b322c3440c4eeb5b53b1d55a2446030b4f892e0b..e2e5234d3996608a6ed9ce952fcb08aabca7e8b5 100644
--- a/src/sync/inc/syncTcp.h
+++ b/src/sync/inc/syncTcp.h
@@ -25,7 +25,7 @@ typedef struct {
uint32_t serverIp;
int16_t port;
int32_t bufferSize;
- void (*processBrokenLink)(int64_t handleId);
+ void (*processBrokenLink)(int64_t handleId, int32_t closedByApp);
int32_t (*processIncomingMsg)(int64_t handleId, void *buffer);
void (*processIncomingConn)(SOCKET fd, uint32_t ip);
} SPoolInfo;
diff --git a/src/sync/src/syncArbitrator.c b/src/sync/src/syncArbitrator.c
index fdbef4c9f827d77bdc1ce1874dc2b368a7467ca1..08ebe7ae3e14dade398c793e3489508e21b58f20 100644
--- a/src/sync/src/syncArbitrator.c
+++ b/src/sync/src/syncArbitrator.c
@@ -30,7 +30,7 @@
extern void syncProcessTestMsg(SSyncMsg *pMsg, SOCKET connFd);
static void arbSignalHandler(int32_t signum, void *sigInfo, void *context);
static void arbProcessIncommingConnection(SOCKET connFd, uint32_t sourceIp);
-static void arbProcessBrokenLink(int64_t rid);
+static void arbProcessBrokenLink(int64_t rid, int32_t closedByApp);
static int32_t arbProcessPeerMsg(int64_t rid, void *buffer);
static tsem_t tsArbSem;
static void * tsArbTcpPool;
@@ -147,10 +147,10 @@ static void arbProcessIncommingConnection(SOCKET connFd, uint32_t sourceIp) {
return;
}
-static void arbProcessBrokenLink(int64_t rid) {
+static void arbProcessBrokenLink(int64_t rid, int32_t closedByApp) {
SNodeConn *pNode = (SNodeConn *)rid;
- sDebug("%s, TCP link is broken since %s, close connection", pNode->id, strerror(errno));
+ sDebug("%s, TCP link is broken since %s, closedByApp:%d", pNode->id, strerror(errno), closedByApp);
tfree(pNode);
}
diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c
index e9b24315d095c3a379f9702079f7c93be5149282..68bafb09ca82a7a55b8eb3cd94c24138f5ef3a6c 100644
--- a/src/sync/src/syncMain.c
+++ b/src/sync/src/syncMain.c
@@ -43,7 +43,7 @@ static void syncProcessSyncRequest(char *pMsg, SSyncPeer *pPeer);
static void syncRecoverFromMaster(SSyncPeer *pPeer);
static void syncCheckPeerConnection(void *param, void *tmrId);
static int32_t syncSendPeersStatusMsgToPeer(SSyncPeer *pPeer, char ack, int8_t type, uint16_t tranId);
-static void syncProcessBrokenLink(int64_t rid);
+static void syncProcessBrokenLink(int64_t rid, int32_t closedByApp);
static int32_t syncProcessPeerMsg(int64_t rid, void *buffer);
static void syncProcessIncommingConnection(SOCKET connFd, uint32_t sourceIp);
static void syncRemovePeer(SSyncPeer *pPeer);
@@ -1308,7 +1308,7 @@ static void syncProcessIncommingConnection(SOCKET connFd, uint32_t sourceIp) {
pthread_mutex_unlock(&pNode->mutex);
}
-static void syncProcessBrokenLink(int64_t rid) {
+static void syncProcessBrokenLink(int64_t rid, int32_t closedByApp) {
SSyncPeer *pPeer = syncAcquirePeer(rid);
if (pPeer == NULL) return;
@@ -1316,9 +1316,10 @@ static void syncProcessBrokenLink(int64_t rid) {
pthread_mutex_lock(&pNode->mutex);
- sDebug("%s, TCP link is broken since %s, pfd:%d sfd:%d", pPeer->id, strerror(errno), pPeer->peerFd, pPeer->syncFd);
+ sDebug("%s, TCP link is broken since %s, pfd:%d sfd:%d closedByApp:%d",
+ pPeer->id, strerror(errno), pPeer->peerFd, pPeer->syncFd, closedByApp);
pPeer->peerFd = -1;
- if (pPeer->isArb) {
+ if (!closedByApp && pPeer->isArb) {
tsArbOnline = 0;
}
diff --git a/src/sync/src/syncRetrieve.c b/src/sync/src/syncRetrieve.c
index 89fdda0686ffc6d7d5b372def92e48c6cf06c2ab..c86ab8549974712658ad3d381c4141427c000762 100644
--- a/src/sync/src/syncRetrieve.c
+++ b/src/sync/src/syncRetrieve.c
@@ -415,7 +415,6 @@ static int32_t syncRetrieveDataStepByStep(SSyncPeer *pPeer) {
}
void *syncRetrieveData(void *param) {
- setThreadName("syncRetrievData");
int64_t rid = (int64_t)param;
SSyncPeer *pPeer = syncAcquirePeer(rid);
if (pPeer == NULL) {
diff --git a/src/sync/src/syncTcp.c b/src/sync/src/syncTcp.c
index 698245f9e408281e2a7c41da2d6228a1ec12217d..ccb0a67e5ca99441a1c3026fee498c36795518c5 100644
--- a/src/sync/src/syncTcp.c
+++ b/src/sync/src/syncTcp.c
@@ -177,7 +177,7 @@ static void taosProcessBrokenLink(SConnObj *pConn) {
SPoolInfo * pInfo = &pPool->info;
if (pConn->closedByApp == 0) shutdown(pConn->fd, SHUT_WR);
- (*pInfo->processBrokenLink)(pConn->handleId);
+ (*pInfo->processBrokenLink)(pConn->handleId, pConn->closedByApp);
pThread->numOfFds--;
epoll_ctl(pThread->pollFd, EPOLL_CTL_DEL, pConn->fd, NULL);
diff --git a/src/sync/test/syncClient.c b/src/sync/test/syncClient.c
index 303d2376effffa3a3b2dc01580352a43aeaac9d8..23ea54ee0c19b6ad2f93d7577d8d711874b10968 100644
--- a/src/sync/test/syncClient.c
+++ b/src/sync/test/syncClient.c
@@ -48,8 +48,6 @@ void *sendRequest(void *param) {
SInfo * pInfo = (SInfo *)param;
SRpcMsg rpcMsg = {0};
- setThreadName("sendCliReq");
-
uDebug("thread:%d, start to send request", pInfo->index);
while (pInfo->numOfReqs == 0 || pInfo->num < pInfo->numOfReqs) {
diff --git a/src/sync/test/syncServer.c b/src/sync/test/syncServer.c
index a3d06966488b95a561c9bc688b49f7ceceb87248..4598e16a9d05be29d11612755a079ce0a228a2ff 100644
--- a/src/sync/test/syncServer.c
+++ b/src/sync/test/syncServer.c
@@ -178,7 +178,7 @@ void *processWriteQueue(void *param) {
int type;
void *item;
- setThreadName("writeQ");
+ setThreadName("syncWrite");
while (1) {
int ret = taosReadQitem(qhandle, &type, &item);
diff --git a/src/tsdb/inc/tsdbBuffer.h b/src/tsdb/inc/tsdbBuffer.h
index 4e18ac711a159cd97fd6255c5be0e8aa6ff4abaf..ec6b057aef142fb938993b3a27717c5e64937258 100644
--- a/src/tsdb/inc/tsdbBuffer.h
+++ b/src/tsdb/inc/tsdbBuffer.h
@@ -40,7 +40,7 @@ void tsdbFreeBufPool(STsdbBufPool* pBufPool);
int tsdbOpenBufPool(STsdbRepo* pRepo);
void tsdbCloseBufPool(STsdbRepo* pRepo);
SListNode* tsdbAllocBufBlockFromPool(STsdbRepo* pRepo);
-int tsdbExpendPool(STsdbRepo* pRepo, int32_t oldTotalBlocks);
+int tsdbExpandPool(STsdbRepo* pRepo, int32_t oldTotalBlocks);
void tsdbRecycleBufferBlock(STsdbBufPool* pPool, SListNode *pNode);
-#endif /* _TD_TSDB_BUFFER_H_ */
\ No newline at end of file
+#endif /* _TD_TSDB_BUFFER_H_ */
diff --git a/src/tsdb/inc/tsdbReadImpl.h b/src/tsdb/inc/tsdbReadImpl.h
index e7840d9723f7935c69f70f1c236a5ed49c82c146..814c4d130599768e8237145559c47e50e64db4db 100644
--- a/src/tsdb/inc/tsdbReadImpl.h
+++ b/src/tsdb/inc/tsdbReadImpl.h
@@ -16,6 +16,13 @@
#ifndef _TD_TSDB_READ_IMPL_H_
#define _TD_TSDB_READ_IMPL_H_
+#include "tfs.h"
+#include "tsdb.h"
+#include "os.h"
+#include "tsdbFile.h"
+#include "tskiplist.h"
+#include "tsdbMeta.h"
+
typedef struct SReadH SReadH;
typedef struct {
@@ -143,4 +150,4 @@ static FORCE_INLINE int tsdbMakeRoom(void **ppBuf, size_t size) {
return 0;
}
-#endif /*_TD_TSDB_READ_IMPL_H_*/
\ No newline at end of file
+#endif /*_TD_TSDB_READ_IMPL_H_*/
diff --git a/src/tsdb/inc/tsdbRowMergeBuf.h b/src/tsdb/inc/tsdbRowMergeBuf.h
new file mode 100644
index 0000000000000000000000000000000000000000..302bf25750fc08367a2840fa9c483919c828fcb5
--- /dev/null
+++ b/src/tsdb/inc/tsdbRowMergeBuf.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef TSDB_ROW_MERGE_BUF_H
+#define TSDB_ROW_MERGE_BUF_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "tsdb.h"
+#include "tchecksum.h"
+#include "tsdbReadImpl.h"
+
+typedef void* SMergeBuf;
+
+SDataRow tsdbMergeTwoRows(SMergeBuf *pBuf, SMemRow row1, SMemRow row2, STSchema *pSchema1, STSchema *pSchema2);
+
+static FORCE_INLINE int tsdbMergeBufMakeSureRoom(SMergeBuf *pBuf, STSchema* pSchema1, STSchema* pSchema2) {
+ return tsdbMakeRoom(pBuf, MAX(dataRowMaxBytesFromSchema(pSchema1), dataRowMaxBytesFromSchema(pSchema2)));
+}
+
+static FORCE_INLINE void tsdbFreeMergeBuf(SMergeBuf buf) {
+ taosTZfree(buf);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ifndef TSDB_ROW_MERGE_BUF_H */
diff --git a/src/tsdb/inc/tsdbint.h b/src/tsdb/inc/tsdbint.h
index dd43e393102c40d4b12a948a9b4d6d72f30f6dad..757a0951e8640656951904ffdcca1024cc27a800 100644
--- a/src/tsdb/inc/tsdbint.h
+++ b/src/tsdb/inc/tsdbint.h
@@ -68,6 +68,8 @@ extern "C" {
#include "tsdbCompact.h"
// Commit Queue
#include "tsdbCommitQueue.h"
+
+#include "tsdbRowMergeBuf.h"
// Main definitions
struct STsdbRepo {
uint8_t state;
@@ -92,6 +94,7 @@ struct STsdbRepo {
pthread_mutex_t mutex;
bool repoLocked;
int32_t code; // Commit code
+ SMergeBuf mergeBuf; //used when update=2
bool inCompact; // is in compact process?
};
@@ -139,4 +142,4 @@ static FORCE_INLINE int tsdbGetNextMaxTables(int tid) {
}
#endif
-#endif /* _TD_TSDB_INT_H_ */
\ No newline at end of file
+#endif /* _TD_TSDB_INT_H_ */
diff --git a/src/tsdb/src/tsdbBuffer.c b/src/tsdb/src/tsdbBuffer.c
index 429ea8e0ceb2c687b53ebd0a9d61d02d2a1f3686..e675bf6f9de04021112d43a1db70cf56cf430f08 100644
--- a/src/tsdb/src/tsdbBuffer.c
+++ b/src/tsdb/src/tsdbBuffer.c
@@ -159,7 +159,7 @@ _err:
static void tsdbFreeBufBlock(STsdbBufBlock *pBufBlock) { tfree(pBufBlock); }
-int tsdbExpendPool(STsdbRepo* pRepo, int32_t oldTotalBlocks) {
+int tsdbExpandPool(STsdbRepo* pRepo, int32_t oldTotalBlocks) {
if (oldTotalBlocks == pRepo->config.totalBlocks) {
return TSDB_CODE_SUCCESS;
}
@@ -199,4 +199,4 @@ void tsdbRecycleBufferBlock(STsdbBufPool* pPool, SListNode *pNode) {
tsdbFreeBufBlock(pBufBlock);
free(pNode);
pPool->nBufBlocks--;
-}
\ No newline at end of file
+}
diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c
index 6330da60585afb590ee82018b3204280f62e5f1c..6c98283189b8a3e83ff888bfb9530bb85127c27d 100644
--- a/src/tsdb/src/tsdbCommit.c
+++ b/src/tsdb/src/tsdbCommit.c
@@ -1290,7 +1290,7 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt
ASSERT(pSchema != NULL);
}
- tdAppendMemRowToDataCol(row, pSchema, pTarget);
+ tdAppendMemRowToDataCol(row, pSchema, pTarget, true);
}
tSkipListIterNext(pCommitIter->pIter);
@@ -1302,7 +1302,7 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt
ASSERT(pSchema != NULL);
}
- tdAppendMemRowToDataCol(row, pSchema, pTarget);
+ tdAppendMemRowToDataCol(row, pSchema, pTarget, update == TD_ROW_OVERWRITE_UPDATE);
}
} else {
ASSERT(!isRowDel);
diff --git a/src/tsdb/src/tsdbCommitQueue.c b/src/tsdb/src/tsdbCommitQueue.c
index e45ac05e979b119ee7995cf845a6f242a9953cb3..59fb4f334d3006eb7e8807ce193d61905f2322d2 100644
--- a/src/tsdb/src/tsdbCommitQueue.c
+++ b/src/tsdb/src/tsdbCommitQueue.c
@@ -138,7 +138,7 @@ static void tsdbApplyRepoConfig(STsdbRepo *pRepo) {
pSaveCfg->compression, pSaveCfg->keep,pSaveCfg->keep1, pSaveCfg->keep2,
pSaveCfg->totalBlocks, oldCfg.cacheLastRow, pSaveCfg->cacheLastRow, oldTotalBlocks, pSaveCfg->totalBlocks);
- int err = tsdbExpendPool(pRepo, oldTotalBlocks);
+ int err = tsdbExpandPool(pRepo, oldTotalBlocks);
if (!TAOS_SUCCEEDED(err)) {
tsdbError("vgId:%d expand pool from %d to %d fail,reason:%s",
REPO_ID(pRepo), oldTotalBlocks, pSaveCfg->totalBlocks, tstrerror(err));
diff --git a/src/tsdb/src/tsdbCompact.c b/src/tsdb/src/tsdbCompact.c
index 5211ee3c611bb9a75ba02aa5fdf810bb52b1ab9d..0490f26b5ec649a420d0340990a8f49c14f9deb0 100644
--- a/src/tsdb/src/tsdbCompact.c
+++ b/src/tsdb/src/tsdbCompact.c
@@ -455,7 +455,7 @@ static int tsdbCompactMeta(STsdbRepo *pRepo) {
if (pReadh->pDCols[0]->numOfRows - ridx == 0) break;
int rowsToMerge = MIN(pReadh->pDCols[0]->numOfRows - ridx, defaultRows - pComph->pDataCols->numOfRows);
- tdMergeDataCols(pComph->pDataCols, pReadh->pDCols[0], rowsToMerge, &ridx);
+ tdMergeDataCols(pComph->pDataCols, pReadh->pDCols[0], rowsToMerge, &ridx, pCfg->update != TD_ROW_PARTIAL_UPDATE);
if (pComph->pDataCols->numOfRows < defaultRows) {
break;
diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c
index 8266a7c20fd8286afa11e4c3ddbbeed51b503ee6..c877bfc7af2620c354c3613977a6b9da7ca96825 100644
--- a/src/tsdb/src/tsdbMain.c
+++ b/src/tsdb/src/tsdbMain.c
@@ -14,6 +14,7 @@
*/
// no test file errors here
+#include "taosdef.h"
#include "tsdbint.h"
#define IS_VALID_PRECISION(precision) \
@@ -106,6 +107,8 @@ STsdbRepo *tsdbOpenRepo(STsdbCfg *pCfg, STsdbAppH *pAppH) {
return NULL;
}
+ pRepo->mergeBuf = NULL;
+
tsdbStartStream(pRepo);
tsdbDebug("vgId:%d, TSDB repository opened", REPO_ID(pRepo));
@@ -518,7 +521,8 @@ static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg) {
}
// update check
- if (pCfg->update != 0) pCfg->update = 1;
+ if (pCfg->update < TD_ROW_DISCARD_UPDATE || pCfg->update > TD_ROW_PARTIAL_UPDATE)
+ pCfg->update = TD_ROW_DISCARD_UPDATE;
// update cacheLastRow
if (pCfg->cacheLastRow != 0) {
@@ -597,6 +601,7 @@ static void tsdbFreeRepo(STsdbRepo *pRepo) {
tsdbFreeFS(pRepo->fs);
tsdbFreeBufPool(pRepo->pPool);
tsdbFreeMeta(pRepo->tsdbMeta);
+ tsdbFreeMergeBuf(pRepo->mergeBuf);
// tsdbFreeMemTable(pRepo->mem);
// tsdbFreeMemTable(pRepo->imem);
tsem_destroy(&(pRepo->readyToCommit));
@@ -722,7 +727,8 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea
// OK,let's load row from backward to get not-null column
for (int32_t rowId = pBlock->numOfRows - 1; rowId >= 0; rowId--) {
SDataCol *pDataCol = pReadh->pDCols[0]->cols + i;
- tdAppendColVal(memRowDataBody(row), tdGetColDataOfRow(pDataCol, rowId), pCol->type, pCol->offset);
+ const void* pColData = tdGetColDataOfRow(pDataCol, rowId);
+ tdAppendColVal(memRowDataBody(row), pColData, pCol->type, pCol->offset);
//SDataCol *pDataCol = readh.pDCols[0]->cols + j;
void *value = tdGetRowDataOfCol(memRowDataBody(row), (int8_t)pCol->type, TD_DATA_ROW_HEAD_SIZE + pCol->offset);
if (isNull(value, pCol->type)) {
@@ -735,11 +741,12 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea
continue;
}
// save not-null column
+ uint16_t bytes = IS_VAR_DATA_TYPE(pCol->type) ? varDataTLen(pColData) : pCol->bytes;
SDataCol *pLastCol = &(pTable->lastCols[idx]);
- pLastCol->pData = malloc(pCol->bytes);
- pLastCol->bytes = pCol->bytes;
+ pLastCol->pData = malloc(bytes);
+ pLastCol->bytes = bytes;
pLastCol->colId = pCol->colId;
- memcpy(pLastCol->pData, value, pCol->bytes);
+ memcpy(pLastCol->pData, value, bytes);
// save row ts(in column 0)
pDataCol = pReadh->pDCols[0]->cols + 0;
@@ -991,4 +998,4 @@ int tsdbCacheLastData(STsdbRepo *pRepo, STsdbCfg* oldCfg) {
}
return 0;
-}
\ No newline at end of file
+}
diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c
index 9ac5503e5f9dae8012c0ee76ec088c25cf43fb59..8bb2d1c44e17d7409c5e290adf3cf0e7d1e79528 100644
--- a/src/tsdb/src/tsdbMemTable.c
+++ b/src/tsdb/src/tsdbMemTable.c
@@ -13,7 +13,11 @@
* along with this program. If not, see .
*/
+#include "tdataformat.h"
+#include "tfunctional.h"
#include "tsdbint.h"
+#include "tskiplist.h"
+#include "tsdbRowMergeBuf.h"
#define TSDB_DATA_SKIPLIST_LEVEL 5
#define TSDB_MAX_INSERT_BATCH 512
@@ -30,24 +34,22 @@ typedef struct {
void * pMsg;
} SSubmitMsgIter;
-static SMemTable * tsdbNewMemTable(STsdbRepo *pRepo);
-static void tsdbFreeMemTable(SMemTable *pMemTable);
-static STableData *tsdbNewTableData(STsdbCfg *pCfg, STable *pTable);
-static void tsdbFreeTableData(STableData *pTableData);
-static char * tsdbGetTsTupleKey(const void *data);
+static SMemTable * tsdbNewMemTable(STsdbRepo *pRepo);
+static void tsdbFreeMemTable(SMemTable *pMemTable);
+static STableData* tsdbNewTableData(STsdbCfg *pCfg, STable *pTable);
+static void tsdbFreeTableData(STableData *pTableData);
+static char * tsdbGetTsTupleKey(const void *data);
static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables);
-static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema **ppSchema, SMemRow row);
+static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema **ppSchema, SMemRow row);
static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter);
-static SMemRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter);
+static SMemRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter);
static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg);
static int tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, int32_t *affectedrows);
-static int tsdbCopyRowToMem(STsdbRepo *pRepo, SMemRow row, STable *pTable, void **ppRow);
static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter);
static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock);
static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable);
-static int tsdbInsertDataToTableImpl(STsdbRepo *pRepo, STable *pTable, void **rows, int rowCounter);
-static void tsdbFreeRows(STsdbRepo *pRepo, void **rows, int rowCounter);
-static int tsdbUpdateTableLatestInfo(STsdbRepo *pRepo, STable *pTable, SMemRow row);
+static int tsdbUpdateTableLatestInfo(STsdbRepo *pRepo, STable *pTable, SMemRow row);
+
static FORCE_INLINE int tsdbCheckRowRange(STsdbRepo *pRepo, STable *pTable, SMemRow row, TSKEY minKey, TSKEY maxKey,
TSKEY now);
@@ -342,7 +344,7 @@ int tsdbSyncCommit(STsdbRepo *repo) {
* 3. rowsIncreased = rowsInserted - rowsDeleteSucceed >= maxRowsToRead
* 4. operations in pCols not exceeds its max capacity if pCols is given
*
- * The function tries to procceed AS MUSH AS POSSIBLE.
+ * The function tries to procceed AS MUCH AS POSSIBLE.
*/
int tsdbLoadDataFromCache(STable *pTable, SSkipListIterator *pIter, TSKEY maxKey, int maxRowsToRead, SDataCols *pCols,
TKEY *filterKeys, int nFilterKeys, bool keepDup, SMergeInfo *pMergeInfo) {
@@ -523,9 +525,15 @@ static STableData *tsdbNewTableData(STsdbCfg *pCfg, STable *pTable) {
pTableData->keyLast = 0;
pTableData->numOfRows = 0;
+ uint8_t skipListCreateFlags;
+ if(pCfg->update == TD_ROW_DISCARD_UPDATE)
+ skipListCreateFlags = SL_DISCARD_DUP_KEY;
+ else
+ skipListCreateFlags = SL_UPDATE_DUP_KEY;
+
pTableData->pData =
tSkipListCreate(TSDB_DATA_SKIPLIST_LEVEL, TSDB_DATA_TYPE_TIMESTAMP, TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP],
- tkeyComparFn, pCfg->update ? SL_UPDATE_DUP_KEY : SL_DISCARD_DUP_KEY, tsdbGetTsTupleKey);
+ tkeyComparFn, skipListCreateFlags, tsdbGetTsTupleKey);
if (pTableData->pData == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
free(pTableData);
@@ -581,7 +589,7 @@ static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema *
}
}
- tdAppendMemRowToDataCol(row, *ppSchema, pCols);
+ tdAppendMemRowToDataCol(row, *ppSchema, pCols, true);
}
return 0;
@@ -693,95 +701,162 @@ static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg) {
return 0;
}
-static int tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, int32_t *affectedrows) {
- STsdbMeta * pMeta = pRepo->tsdbMeta;
- int64_t points = 0;
- STable * pTable = NULL;
- SSubmitBlkIter blkIter = {0};
- SMemRow row = NULL;
- void * rows[TSDB_MAX_INSERT_BATCH] = {0};
- int rowCounter = 0;
+//row1 has higher priority
+static SMemRow tsdbInsertDupKeyMerge(SMemRow row1, SMemRow row2, STsdbRepo* pRepo, STSchema **ppSchema1, STSchema **ppSchema2, STable* pTable, int32_t* pAffectedRows, int64_t* pPoints, SMemRow* pLastRow) {
+
+ //for compatiblity, duplicate key inserted when update=0 should be also calculated as affected rows!
+ if(row1 == NULL && row2 == NULL && pRepo->config.update == TD_ROW_DISCARD_UPDATE) {
+ (*pAffectedRows)++;
+ (*pPoints)++;
+ return NULL;
+ }
- ASSERT(pBlock->tid < pMeta->maxTables);
- pTable = pMeta->tables[pBlock->tid];
- ASSERT(pTable != NULL && TABLE_UID(pTable) == pBlock->uid);
+ if(row2 == NULL || pRepo->config.update != TD_ROW_PARTIAL_UPDATE) {
+ void* pMem = tsdbAllocBytes(pRepo, memRowTLen(row1));
+ if(pMem == NULL) return NULL;
+ memRowCpy(pMem, row1);
+ (*pAffectedRows)++;
+ (*pPoints)++;
+ *pLastRow = pMem;
+ return pMem;
+ }
- tsdbInitSubmitBlkIter(pBlock, &blkIter);
- while ((row = tsdbGetSubmitBlkNext(&blkIter)) != NULL) {
- if (tsdbCopyRowToMem(pRepo, row, pTable, &(rows[rowCounter])) < 0) {
- tsdbFreeRows(pRepo, rows, rowCounter);
- goto _err;
+ STSchema *pSchema1 = *ppSchema1;
+ STSchema *pSchema2 = *ppSchema2;
+ SMergeBuf * pBuf = &pRepo->mergeBuf;
+ int dv1 = memRowVersion(row1);
+ int dv2 = memRowVersion(row2);
+ if(pSchema1 == NULL || schemaVersion(pSchema1) != dv1) {
+ if(pSchema2 != NULL && schemaVersion(pSchema2) == dv1) {
+ *ppSchema1 = pSchema2;
+ } else {
+ *ppSchema1 = tsdbGetTableSchemaImpl(pTable, false, false, memRowVersion(row1));
}
+ pSchema1 = *ppSchema1;
+ }
- (*affectedrows)++;
- points++;
-
- if (rows[rowCounter] != NULL) {
- rowCounter++;
+ if(pSchema2 == NULL || schemaVersion(pSchema2) != dv2) {
+ if(schemaVersion(pSchema1) == dv2) {
+ pSchema2 = pSchema1;
+ } else {
+ *ppSchema2 = tsdbGetTableSchemaImpl(pTable, false, false, memRowVersion(row2));
+ pSchema2 = *ppSchema2;
}
+ }
- if (rowCounter == TSDB_MAX_INSERT_BATCH) {
- if (tsdbInsertDataToTableImpl(pRepo, pTable, rows, rowCounter) < 0) {
- goto _err;
- }
+ SMemRow tmp = tsdbMergeTwoRows(pBuf, row1, row2, pSchema1, pSchema2);
- rowCounter = 0;
- memset(rows, 0, sizeof(rows));
- }
- }
+ void* pMem = tsdbAllocBytes(pRepo, memRowTLen(tmp));
+ if(pMem == NULL) return NULL;
+ memRowCpy(pMem, tmp);
- if (rowCounter > 0 && tsdbInsertDataToTableImpl(pRepo, pTable, rows, rowCounter) < 0) {
- goto _err;
- }
+ (*pAffectedRows)++;
+ (*pPoints)++;
- STSchema *pSchema = tsdbGetTableSchemaByVersion(pTable, pBlock->sversion);
- pRepo->stat.pointsWritten += points * schemaNCols(pSchema);
- pRepo->stat.totalStorage += points * schemaVLen(pSchema);
+ *pLastRow = pMem;
+ return pMem;
+}
- return 0;
+static void* tsdbInsertDupKeyMergePacked(void** args) {
+ return tsdbInsertDupKeyMerge(args[0], args[1], args[2], (STSchema**)&args[3], (STSchema**)&args[4], args[5], args[6], args[7], args[8]);
+}
-_err:
- return -1;
+static void tsdbSetupSkipListHookFns(SSkipList* pSkipList, STsdbRepo *pRepo, STable *pTable, int32_t* pAffectedRows, int64_t* pPoints, SMemRow* pLastRow) {
+
+ if(pSkipList->insertHandleFn == NULL) {
+ tGenericSavedFunc *dupHandleSavedFunc = genericSavedFuncInit((GenericVaFunc)&tsdbInsertDupKeyMergePacked, 9);
+ dupHandleSavedFunc->args[2] = pRepo;
+ dupHandleSavedFunc->args[3] = NULL;
+ dupHandleSavedFunc->args[4] = NULL;
+ dupHandleSavedFunc->args[5] = pTable;
+ dupHandleSavedFunc->args[6] = pAffectedRows;
+ dupHandleSavedFunc->args[7] = pPoints;
+ dupHandleSavedFunc->args[8] = pLastRow;
+ pSkipList->insertHandleFn = dupHandleSavedFunc;
+ }
}
-static int tsdbCopyRowToMem(STsdbRepo *pRepo, SMemRow row, STable *pTable, void **ppRow) {
- STsdbCfg * pCfg = &pRepo->config;
- TKEY tkey = memRowTKey(row);
- TSKEY key = memRowKey(row);
- bool isRowDelete = TKEY_IS_DELETED(tkey);
+static int tsdbInsertDataToTable(STsdbRepo* pRepo, SSubmitBlk* pBlock, int32_t *pAffectedRows) {
- if (isRowDelete) {
- if (!pCfg->update) {
- tsdbWarn("vgId:%d vnode is not allowed to update but try to delete a data row", REPO_ID(pRepo));
- terrno = TSDB_CODE_TDB_INVALID_ACTION;
+ STsdbMeta *pMeta = pRepo->tsdbMeta;
+ int64_t points = 0;
+ STable *pTable = NULL;
+ SSubmitBlkIter blkIter = {0};
+ SMemTable *pMemTable = NULL;
+ STableData *pTableData = NULL;
+ STsdbCfg *pCfg = &(pRepo->config);
+
+ tsdbInitSubmitBlkIter(pBlock, &blkIter);
+ if(blkIter.row == NULL) return 0;
+ TSKEY firstRowKey = memRowKey(blkIter.row);
+
+ tsdbAllocBytes(pRepo, 0);
+ pMemTable = pRepo->mem;
+
+ ASSERT(pMemTable != NULL);
+ ASSERT(pBlock->tid < pMeta->maxTables);
+
+ pTable = pMeta->tables[pBlock->tid];
+
+ ASSERT(pTable != NULL && TABLE_UID(pTable) == pBlock->uid);
+
+
+ if (TABLE_TID(pTable) >= pMemTable->maxTables) {
+ if (tsdbAdjustMemMaxTables(pMemTable, pMeta->maxTables) < 0) {
return -1;
}
+ }
+ pTableData = pMemTable->tData[TABLE_TID(pTable)];
+
+ if (pTableData == NULL || pTableData->uid != TABLE_UID(pTable)) {
+ if (pTableData != NULL) {
+ taosWLockLatch(&(pMemTable->latch));
+ pMemTable->tData[TABLE_TID(pTable)] = NULL;
+ tsdbFreeTableData(pTableData);
+ taosWUnLockLatch(&(pMemTable->latch));
+ }
- TSKEY lastKey = tsdbGetTableLastKeyImpl(pTable);
- if (key > lastKey) {
- tsdbTrace("vgId:%d skip to delete row key %" PRId64 " which is larger than table lastKey %" PRId64,
- REPO_ID(pRepo), key, lastKey);
- return 0;
+ pTableData = tsdbNewTableData(pCfg, pTable);
+ if (pTableData == NULL) {
+ tsdbError("vgId:%d failed to insert data to table %s uid %" PRId64 " tid %d since %s", REPO_ID(pRepo),
+ TABLE_CHAR_NAME(pTable), TABLE_UID(pTable), TABLE_TID(pTable), tstrerror(terrno));
+ return -1;
}
- }
- void *pRow = tsdbAllocBytes(pRepo, memRowTLen(row));
- if (pRow == NULL) {
- tsdbError("vgId:%d failed to insert row with key %" PRId64 " to table %s while allocate %" PRIu32 " bytes since %s",
- REPO_ID(pRepo), key, TABLE_CHAR_NAME(pTable), memRowTLen(row), tstrerror(terrno));
- return -1;
+ pRepo->mem->tData[TABLE_TID(pTable)] = pTableData;
}
- memRowCpy(pRow, row);
- ppRow[0] = pRow; // save the memory address of data rows
+ ASSERT((pTableData != NULL) && pTableData->uid == TABLE_UID(pTable));
+
+ SMemRow lastRow = NULL;
+ int64_t osize = SL_SIZE(pTableData->pData);
+ tsdbSetupSkipListHookFns(pTableData->pData, pRepo, pTable, pAffectedRows, &points, &lastRow);
+ tSkipListPutBatchByIter(pTableData->pData, &blkIter, (iter_next_fn_t)tsdbGetSubmitBlkNext);
+ int64_t dsize = SL_SIZE(pTableData->pData) - osize;
+
+
+ if(lastRow != NULL) {
+ TSKEY lastRowKey = memRowKey(lastRow);
+ if (pMemTable->keyFirst > firstRowKey) pMemTable->keyFirst = firstRowKey;
+ pMemTable->numOfRows += dsize;
- tsdbTrace("vgId:%d a row is %s table %s tid %d uid %" PRIu64 " key %" PRIu64, REPO_ID(pRepo),
- isRowDelete ? "deleted from" : "updated in", TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable),
- key);
+ if (pTableData->keyFirst > firstRowKey) pTableData->keyFirst = firstRowKey;
+ pTableData->numOfRows += dsize;
+ if (pMemTable->keyLast < lastRowKey) pMemTable->keyLast = lastRowKey;
+ if (pTableData->keyLast < lastRowKey) pTableData->keyLast = lastRowKey;
+ if (tsdbUpdateTableLatestInfo(pRepo, pTable, lastRow) < 0) {
+ return -1;
+ }
+ }
+
+ STSchema *pSchema = tsdbGetTableSchemaByVersion(pTable, pBlock->sversion);
+ pRepo->stat.pointsWritten += points * schemaNCols(pSchema);
+ pRepo->stat.totalStorage += points * schemaVLen(pSchema);
return 0;
}
+
static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter) {
if (pMsg == NULL) {
terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP;
@@ -889,106 +964,6 @@ static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pT
return 0;
}
-static int tsdbInsertDataToTableImpl(STsdbRepo *pRepo, STable *pTable, void **rows, int rowCounter) {
- if (rowCounter < 1) return 0;
-
- SMemTable * pMemTable = NULL;
- STableData *pTableData = NULL;
- STsdbMeta * pMeta = pRepo->tsdbMeta;
- STsdbCfg * pCfg = &(pRepo->config);
-
- ASSERT(pRepo->mem != NULL);
- pMemTable = pRepo->mem;
-
- if (TABLE_TID(pTable) >= pMemTable->maxTables) {
- if (tsdbAdjustMemMaxTables(pMemTable, pMeta->maxTables) < 0) {
- tsdbFreeRows(pRepo, rows, rowCounter);
- return -1;
- }
- }
- pTableData = pMemTable->tData[TABLE_TID(pTable)];
-
- if (pTableData == NULL || pTableData->uid != TABLE_UID(pTable)) {
- if (pTableData != NULL) {
- taosWLockLatch(&(pMemTable->latch));
- pMemTable->tData[TABLE_TID(pTable)] = NULL;
- tsdbFreeTableData(pTableData);
- taosWUnLockLatch(&(pMemTable->latch));
- }
-
- pTableData = tsdbNewTableData(pCfg, pTable);
- if (pTableData == NULL) {
- tsdbError("vgId:%d failed to insert data to table %s uid %" PRId64 " tid %d since %s", REPO_ID(pRepo),
- TABLE_CHAR_NAME(pTable), TABLE_UID(pTable), TABLE_TID(pTable), tstrerror(terrno));
- tsdbFreeRows(pRepo, rows, rowCounter);
- return -1;
- }
-
- pRepo->mem->tData[TABLE_TID(pTable)] = pTableData;
- }
-
- ASSERT((pTableData != NULL) && pTableData->uid == TABLE_UID(pTable));
-
- int64_t osize = SL_SIZE(pTableData->pData);
- tSkipListPutBatch(pTableData->pData, rows, rowCounter);
- int64_t dsize = SL_SIZE(pTableData->pData) - osize;
- TSKEY keyFirstRow = memRowKey(rows[0]);
- TSKEY keyLastRow = memRowKey(rows[rowCounter - 1]);
-
- if (pMemTable->keyFirst > keyFirstRow) pMemTable->keyFirst = keyFirstRow;
- if (pMemTable->keyLast < keyLastRow) pMemTable->keyLast = keyLastRow;
- pMemTable->numOfRows += dsize;
-
- if (pTableData->keyFirst > keyFirstRow) pTableData->keyFirst = keyFirstRow;
- if (pTableData->keyLast < keyLastRow) pTableData->keyLast = keyLastRow;
- pTableData->numOfRows += dsize;
-
- // update table latest info
- if (tsdbUpdateTableLatestInfo(pRepo, pTable, rows[rowCounter - 1]) < 0) {
- return -1;
- }
-
- return 0;
-}
-
-static void tsdbFreeRows(STsdbRepo *pRepo, void **rows, int rowCounter) {
- ASSERT(pRepo->mem != NULL);
- STsdbBufPool *pBufPool = pRepo->pPool;
-
- for (int i = rowCounter - 1; i >= 0; --i) {
- SMemRow row = (SMemRow)rows[i];
- int bytes = (int)memRowTLen(row);
-
- if (pRepo->mem->extraBuffList == NULL) {
- STsdbBufBlock *pBufBlock = tsdbGetCurrBufBlock(pRepo);
- ASSERT(pBufBlock != NULL && pBufBlock->offset >= bytes);
-
- pBufBlock->offset -= bytes;
- pBufBlock->remain += bytes;
- ASSERT(row == POINTER_SHIFT(pBufBlock->data, pBufBlock->offset));
- tsdbTrace("vgId:%d free %d bytes to TSDB buffer pool, nBlocks %d offset %d remain %d", REPO_ID(pRepo), bytes,
- listNEles(pRepo->mem->bufBlockList), pBufBlock->offset, pBufBlock->remain);
-
- if (pBufBlock->offset == 0) { // return the block to buffer pool
- if (tsdbLockRepo(pRepo) < 0) return;
- SListNode *pNode = tdListPopTail(pRepo->mem->bufBlockList);
- tdListPrependNode(pBufPool->bufBlockList, pNode);
- if (tsdbUnlockRepo(pRepo) < 0) return;
- }
- } else {
- ASSERT(listNEles(pRepo->mem->extraBuffList) > 0);
- SListNode *pNode = tdListPopTail(pRepo->mem->extraBuffList);
- ASSERT(row == pNode->data);
- free(pNode);
- tsdbTrace("vgId:%d free %d bytes to SYSTEM buffer pool", REPO_ID(pRepo), bytes);
-
- if (listNEles(pRepo->mem->extraBuffList) == 0) {
- tdListFree(pRepo->mem->extraBuffList);
- pRepo->mem->extraBuffList = NULL;
- }
- }
- }
-}
static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow row) {
tsdbDebug("vgId:%d updateTableLatestColumn, %s row version:%d", REPO_ID(pRepo), pTable->name->data,
@@ -1005,8 +980,8 @@ static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow ro
}
SDataCol *pLatestCols = pTable->lastCols;
+ int32_t kvIdx = 0;
- bool isDataRow = isDataRow(row);
for (int16_t j = 0; j < schemaNCols(pSchema); j++) {
STColumn *pTCol = schemaColAt(pSchema, j);
// ignore not exist colId
@@ -1017,16 +992,8 @@ static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow ro
void *value = NULL;
- if (isDataRow) {
- value = tdGetRowDataOfCol(memRowDataBody(row), (int8_t)pTCol->type,
- TD_DATA_ROW_HEAD_SIZE + pSchema->columns[j].offset);
- } else {
- // SKVRow
- SColIdx *pColIdx = tdGetKVRowIdxOfCol(memRowKvBody(row), pTCol->colId);
- if (pColIdx) {
- value = tdGetKvRowDataOfCol(memRowKvBody(row), pColIdx->offset);
- }
- }
+ value = tdGetMemRowDataOfColEx(row, pTCol->colId, (int8_t)pTCol->type,
+ TD_DATA_ROW_HEAD_SIZE + pSchema->columns[j].offset, &kvIdx);
if ((value == NULL) || isNull(value, pTCol->type)) {
continue;
@@ -1034,14 +1001,17 @@ static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow ro
SDataCol *pDataCol = &(pLatestCols[idx]);
if (pDataCol->pData == NULL) {
- pDataCol->pData = malloc(pSchema->columns[j].bytes);
- pDataCol->bytes = pSchema->columns[j].bytes;
- } else if (pDataCol->bytes < pSchema->columns[j].bytes) {
- pDataCol->pData = realloc(pDataCol->pData, pSchema->columns[j].bytes);
- pDataCol->bytes = pSchema->columns[j].bytes;
+ pDataCol->pData = malloc(pTCol->bytes);
+ pDataCol->bytes = pTCol->bytes;
+ } else if (pDataCol->bytes < pTCol->bytes) {
+ pDataCol->pData = realloc(pDataCol->pData, pTCol->bytes);
+ pDataCol->bytes = pTCol->bytes;
}
-
- memcpy(pDataCol->pData, value, pDataCol->bytes);
+ // the actual value size
+ uint16_t bytes = IS_VAR_DATA_TYPE(pTCol->type) ? varDataTLen(value) : pTCol->bytes;
+ // the actual data size CANNOT larger than column size
+ assert(pTCol->bytes >= bytes);
+ memcpy(pDataCol->pData, value, bytes);
//tsdbInfo("updateTableLatestColumn vgId:%d cache column %d for %d,%s", REPO_ID(pRepo), j, pDataCol->bytes, (char*)pDataCol->pData);
pDataCol->ts = memRowKey(row);
}
@@ -1052,13 +1022,14 @@ static int tsdbUpdateTableLatestInfo(STsdbRepo *pRepo, STable *pTable, SMemRow r
// if cacheLastRow config has been reset, free the lastRow
if (!pCfg->cacheLastRow && pTable->lastRow != NULL) {
- taosTZfree(pTable->lastRow);
+ SMemRow cachedLastRow = pTable->lastRow;
TSDB_WLOCK_TABLE(pTable);
pTable->lastRow = NULL;
TSDB_WUNLOCK_TABLE(pTable);
+ taosTZfree(cachedLastRow);
}
- if (tsdbGetTableLastKeyImpl(pTable) < memRowKey(row)) {
+ if (tsdbGetTableLastKeyImpl(pTable) <= memRowKey(row)) {
if (CACHE_LAST_ROW(pCfg) || pTable->lastRow != NULL) {
SMemRow nrow = pTable->lastRow;
if (taosTSizeof(nrow) < memRowTLen(row)) {
diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c
index 0957a777e33eebdec7d5db11d84e93f2873a46e7..5b93847c9b5ead1b65d08290768b53472402e0a6 100644
--- a/src/tsdb/src/tsdbRead.c
+++ b/src/tsdb/src/tsdbRead.c
@@ -14,11 +14,14 @@
*/
#include "os.h"
+#include "tdataformat.h"
+#include "tskiplist.h"
#include "tulog.h"
#include "talgo.h"
#include "tcompare.h"
#include "exception.h"
+#include "taosdef.h"
#include "tlosertree.h"
#include "tsdbint.h"
#include "texpr.h"
@@ -68,6 +71,12 @@ typedef struct SLoadCompBlockInfo {
int32_t fileId;
} SLoadCompBlockInfo;
+enum {
+ CHECKINFO_CHOSEN_MEM = 0,
+ CHECKINFO_CHOSEN_IMEM = 1,
+ CHECKINFO_CHOSEN_BOTH = 2 //for update=2(merge case)
+};
+
typedef struct STableCheckInfo {
STableId tableId;
@@ -76,7 +85,7 @@ typedef struct STableCheckInfo {
SBlockInfo* pCompInfo;
int32_t compSize;
int32_t numOfBlocks:29; // number of qualified data blocks not the original blocks
- int8_t chosen:2; // indicate which iterator should move forward
+ uint8_t chosen:2; // indicate which iterator should move forward
bool initBuf; // whether to initialize the in-memory skip list iterator or not
SSkipListIterator* iter; // mem buffer skip list iterator
SSkipListIterator* iiter; // imem buffer skip list iterator
@@ -477,6 +486,10 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC
TsdbQueryHandleT* tsdbQueryTables(STsdbRepo* tsdb, STsdbQueryCond* pCond, STableGroupInfo* groupList, uint64_t qId, SMemRef* pRef) {
STsdbQueryHandle* pQueryHandle = tsdbQueryTablesImpl(tsdb, pCond, qId, pRef);
+ if (pQueryHandle == NULL) {
+ return NULL;
+ }
+
if (emptyQueryTimewindow(pQueryHandle)) {
return (TsdbQueryHandleT*) pQueryHandle;
}
@@ -587,6 +600,10 @@ TsdbQueryHandleT tsdbQueryLastRow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STable
}
STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList, qId, pMemRef);
+ if (pQueryHandle == NULL) {
+ return NULL;
+ }
+
int32_t code = checkForCachedLastRow(pQueryHandle, groupList);
if (code != TSDB_CODE_SUCCESS) { // set the numOfTables to be 0
terrno = code;
@@ -604,6 +621,10 @@ TsdbQueryHandleT tsdbQueryLastRow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STable
TsdbQueryHandleT tsdbQueryCacheLast(STsdbRepo *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList, uint64_t qId, SMemRef* pMemRef) {
STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList, qId, pMemRef);
+ if (pQueryHandle == NULL) {
+ return NULL;
+ }
+
int32_t code = checkForCachedLast(pQueryHandle);
if (code != TSDB_CODE_SUCCESS) { // set the numOfTables to be 0
terrno = code;
@@ -640,7 +661,7 @@ static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGr
size_t numOfGroup = taosArrayGetSize(pGroupList->pGroupList);
STableGroupInfo* pNew = calloc(1, sizeof(STableGroupInfo));
- pNew->pGroupList = taosArrayInit(numOfGroup, sizeof(SArray));
+ pNew->pGroupList = taosArrayInit(numOfGroup, POINTER_BYTES);
for(int32_t i = 0; i < numOfGroup; ++i) {
SArray* oneGroup = taosArrayGetP(pGroupList->pGroupList, i);
@@ -781,7 +802,62 @@ static void destroyTableMemIterator(STableCheckInfo* pCheckInfo) {
tSkipListDestroyIter(pCheckInfo->iiter);
}
-static SMemRow getSDataRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order, int32_t update) {
+static TSKEY extractFirstTraverseKey(STableCheckInfo* pCheckInfo, int32_t order, int32_t update) {
+ SMemRow rmem = NULL, rimem = NULL;
+ if (pCheckInfo->iter) {
+ SSkipListNode* node = tSkipListIterGet(pCheckInfo->iter);
+ if (node != NULL) {
+ rmem = (SMemRow)SL_GET_NODE_DATA(node);
+ }
+ }
+
+ if (pCheckInfo->iiter) {
+ SSkipListNode* node = tSkipListIterGet(pCheckInfo->iiter);
+ if (node != NULL) {
+ rimem = (SMemRow)SL_GET_NODE_DATA(node);
+ }
+ }
+
+ if (rmem == NULL && rimem == NULL) {
+ return TSKEY_INITIAL_VAL;
+ }
+
+ if (rmem != NULL && rimem == NULL) {
+ pCheckInfo->chosen = CHECKINFO_CHOSEN_MEM;
+ return memRowKey(rmem);
+ }
+
+ if (rmem == NULL && rimem != NULL) {
+ pCheckInfo->chosen = CHECKINFO_CHOSEN_IMEM;
+ return memRowKey(rimem);
+ }
+
+ TSKEY r1 = memRowKey(rmem);
+ TSKEY r2 = memRowKey(rimem);
+
+ if (r1 == r2) {
+ if(update == TD_ROW_DISCARD_UPDATE){
+ pCheckInfo->chosen = CHECKINFO_CHOSEN_IMEM;
+ tSkipListIterNext(pCheckInfo->iter);
+ }
+ else if(update == TD_ROW_OVERWRITE_UPDATE) {
+ pCheckInfo->chosen = CHECKINFO_CHOSEN_MEM;
+ tSkipListIterNext(pCheckInfo->iiter);
+ } else {
+ pCheckInfo->chosen = CHECKINFO_CHOSEN_BOTH;
+ }
+ return r1;
+ } else if (r1 < r2 && ASCENDING_TRAVERSE(order)) {
+ pCheckInfo->chosen = CHECKINFO_CHOSEN_MEM;
+ return r1;
+ }
+ else {
+ pCheckInfo->chosen = CHECKINFO_CHOSEN_IMEM;
+ return r2;
+ }
+}
+
+static SMemRow getSMemRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order, int32_t update, SMemRow* extraRow) {
SMemRow rmem = NULL, rimem = NULL;
if (pCheckInfo->iter) {
SSkipListNode* node = tSkipListIterGet(pCheckInfo->iter);
@@ -814,31 +890,35 @@ static SMemRow getSDataRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order,
TSKEY r1 = memRowKey(rmem);
TSKEY r2 = memRowKey(rimem);
- if (r1 == r2) { // data ts are duplicated, ignore the data in mem
- if (!update) {
+ if (r1 == r2) {
+ if (update == TD_ROW_DISCARD_UPDATE) {
tSkipListIterNext(pCheckInfo->iter);
- pCheckInfo->chosen = 1;
+ pCheckInfo->chosen = CHECKINFO_CHOSEN_IMEM;
return rimem;
- } else {
+ } else if(update == TD_ROW_OVERWRITE_UPDATE){
tSkipListIterNext(pCheckInfo->iiter);
- pCheckInfo->chosen = 0;
+ pCheckInfo->chosen = CHECKINFO_CHOSEN_MEM;
+ return rmem;
+ } else {
+ pCheckInfo->chosen = CHECKINFO_CHOSEN_BOTH;
+ extraRow = rimem;
return rmem;
}
} else {
if (ASCENDING_TRAVERSE(order)) {
if (r1 < r2) {
- pCheckInfo->chosen = 0;
+ pCheckInfo->chosen = CHECKINFO_CHOSEN_MEM;
return rmem;
} else {
- pCheckInfo->chosen = 1;
+ pCheckInfo->chosen = CHECKINFO_CHOSEN_IMEM;
return rimem;
}
} else {
if (r1 < r2) {
- pCheckInfo->chosen = 1;
+ pCheckInfo->chosen = CHECKINFO_CHOSEN_IMEM;
return rimem;
} else {
- pCheckInfo->chosen = 0;
+ pCheckInfo->chosen = CHECKINFO_CHOSEN_IMEM;
return rmem;
}
}
@@ -847,7 +927,7 @@ static SMemRow getSDataRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order,
static bool moveToNextRowInMem(STableCheckInfo* pCheckInfo) {
bool hasNext = false;
- if (pCheckInfo->chosen == 0) {
+ if (pCheckInfo->chosen == CHECKINFO_CHOSEN_MEM) {
if (pCheckInfo->iter != NULL) {
hasNext = tSkipListIterNext(pCheckInfo->iter);
}
@@ -859,7 +939,7 @@ static bool moveToNextRowInMem(STableCheckInfo* pCheckInfo) {
if (pCheckInfo->iiter != NULL) {
return tSkipListIterGet(pCheckInfo->iiter) != NULL;
}
- } else { //pCheckInfo->chosen == 1
+ } else if (pCheckInfo->chosen == CHECKINFO_CHOSEN_IMEM){
if (pCheckInfo->iiter != NULL) {
hasNext = tSkipListIterNext(pCheckInfo->iiter);
}
@@ -871,6 +951,13 @@ static bool moveToNextRowInMem(STableCheckInfo* pCheckInfo) {
if (pCheckInfo->iter != NULL) {
return tSkipListIterGet(pCheckInfo->iter) != NULL;
}
+ } else {
+ if (pCheckInfo->iter != NULL) {
+ hasNext = tSkipListIterNext(pCheckInfo->iter);
+ }
+ if (pCheckInfo->iiter != NULL) {
+ hasNext = tSkipListIterNext(pCheckInfo->iiter) || hasNext;
+ }
}
return hasNext;
@@ -891,7 +978,7 @@ static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) {
initTableMemIterator(pHandle, pCheckInfo);
}
- SMemRow row = getSDataRowInTableMem(pCheckInfo, pHandle->order, pCfg->update);
+ SMemRow row = getSMemRowInTableMem(pCheckInfo, pHandle->order, pCfg->update, NULL);
if (row == NULL) {
return false;
}
@@ -1147,25 +1234,28 @@ static int32_t doCopyRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t c
static void moveDataToFront(STsdbQueryHandle* pQueryHandle, int32_t numOfRows, int32_t numOfCols);
static void doCheckGeneratedBlockRange(STsdbQueryHandle* pQueryHandle);
static void copyAllRemainRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SDataBlockInfo* pBlockInfo, int32_t endPos);
+static TSKEY extractFirstTraverseKey(STableCheckInfo* pCheckInfo, int32_t order, int32_t update);
static int32_t handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SBlock* pBlock, STableCheckInfo* pCheckInfo){
SQueryFilePos* cur = &pQueryHandle->cur;
STsdbCfg* pCfg = &pQueryHandle->pTsdb->config;
SDataBlockInfo binfo = GET_FILE_DATA_BLOCK_INFO(pCheckInfo, pBlock);
+ TSKEY key;
int32_t code = TSDB_CODE_SUCCESS;
/*bool hasData = */ initTableMemIterator(pQueryHandle, pCheckInfo);
- SMemRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order, pCfg->update);
assert(cur->pos >= 0 && cur->pos <= binfo.rows);
- TSKEY key = (row != NULL) ? memRowKey(row) : TSKEY_INITIAL_VAL;
+ key = extractFirstTraverseKey(pCheckInfo, pQueryHandle->order, pCfg->update);
+
if (key != TSKEY_INITIAL_VAL) {
tsdbDebug("%p key in mem:%"PRId64", 0x%"PRIx64, pQueryHandle, key, pQueryHandle->qId);
} else {
tsdbDebug("%p no data in mem, 0x%"PRIx64, pQueryHandle, pQueryHandle->qId);
}
+
if ((ASCENDING_TRAVERSE(pQueryHandle->order) && (key != TSKEY_INITIAL_VAL && key <= binfo.window.ekey)) ||
(!ASCENDING_TRAVERSE(pQueryHandle->order) && (key != TSKEY_INITIAL_VAL && key >= binfo.window.skey))) {
@@ -1190,6 +1280,7 @@ static int32_t handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SBlock* p
return code;
}
+
// return error, add test cases
if ((code = doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo, cur->slot)) != TSDB_CODE_SUCCESS) {
return code;
@@ -1452,40 +1543,125 @@ int32_t doCopyRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t capacity
return numOfRows + num;
}
-static void copyOneRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity, int32_t numOfRows, SMemRow row,
- int32_t numOfCols, STable* pTable, STSchema* pSchema) {
+// Note: row1 always has high priority
+static void mergeTwoRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity, int32_t numOfRows,
+ SMemRow row1, SMemRow row2, int32_t numOfCols, STable* pTable,
+ STSchema* pSchema1, STSchema* pSchema2, bool forceSetNull) {
char* pData = NULL;
+ STSchema* pSchema;
+ SMemRow row;
+ int16_t colId;
+ int16_t offset;
+
+ bool isRow1DataRow = isDataRow(row1);
+ bool isRow2DataRow;
+ bool isChosenRowDataRow;
+ int32_t chosen_itr;
+ void *value;
- // the schema version info is embedded in SDataRow, and use latest schema version for SKVRow
- int32_t numOfRowCols = 0;
- if (pSchema == NULL) {
- pSchema = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row));
- numOfRowCols = schemaNCols(pSchema);
+ // the schema version info is embeded in SDataRow
+ int32_t numOfColsOfRow1 = 0;
+
+ if (pSchema1 == NULL) {
+ pSchema1 = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row1));
+ }
+ if(isRow1DataRow) {
+ numOfColsOfRow1 = schemaNCols(pSchema1);
} else {
- numOfRowCols = schemaNCols(pSchema);
+ numOfColsOfRow1 = kvRowNCols(memRowKvBody(row1));
}
- int32_t i = 0;
+ int32_t numOfColsOfRow2 = 0;
+ if(row2) {
+ isRow2DataRow = isDataRow(row2);
+ if (pSchema2 == NULL) {
+ pSchema2 = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row2));
+ }
+ if(isRow2DataRow) {
+ numOfColsOfRow2 = schemaNCols(pSchema2);
+ } else {
+ numOfColsOfRow2 = kvRowNCols(memRowKvBody(row2));
+ }
+ }
- if (isDataRow(row)) {
- SDataRow dataRow = memRowDataBody(row);
- int32_t j = 0;
- while (i < numOfCols && j < numOfRowCols) {
- SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i);
- if (pSchema->columns[j].colId < pColInfo->info.colId) {
+
+ int32_t i = 0, j = 0, k = 0;
+ while(i < numOfCols && (j < numOfColsOfRow1 || k < numOfColsOfRow2)) {
+ SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i);
+
+ if (ASCENDING_TRAVERSE(pQueryHandle->order)) {
+ pData = (char*)pColInfo->pData + numOfRows * pColInfo->info.bytes;
+ } else {
+ pData = (char*)pColInfo->pData + (capacity - numOfRows - 1) * pColInfo->info.bytes;
+ }
+
+ int32_t colIdOfRow1;
+ if(j >= numOfColsOfRow1) {
+ colIdOfRow1 = INT32_MAX;
+ } else if(isRow1DataRow) {
+ colIdOfRow1 = pSchema1->columns[j].colId;
+ } else {
+ void *rowBody = memRowKvBody(row1);
+ SColIdx *pColIdx = kvRowColIdxAt(rowBody, j);
+ colIdOfRow1 = pColIdx->colId;
+ }
+
+ int32_t colIdOfRow2;
+ if(k >= numOfColsOfRow2) {
+ colIdOfRow2 = INT32_MAX;
+ } else if(isRow2DataRow) {
+ colIdOfRow2 = pSchema2->columns[k].colId;
+ } else {
+ void *rowBody = memRowKvBody(row2);
+ SColIdx *pColIdx = kvRowColIdxAt(rowBody, k);
+ colIdOfRow2 = pColIdx->colId;
+ }
+
+ if(colIdOfRow1 == colIdOfRow2) {
+ if(colIdOfRow1 < pColInfo->info.colId) {
j++;
+ k++;
continue;
}
-
- if (ASCENDING_TRAVERSE(pQueryHandle->order)) {
- pData = (char*)pColInfo->pData + numOfRows * pColInfo->info.bytes;
- } else {
- pData = (char*)pColInfo->pData + (capacity - numOfRows - 1) * pColInfo->info.bytes;
+ row = row1;
+ pSchema = pSchema1;
+ isChosenRowDataRow = isRow1DataRow;
+ chosen_itr = j;
+ } else if(colIdOfRow1 < colIdOfRow2) {
+ if(colIdOfRow1 < pColInfo->info.colId) {
+ j++;
+ continue;
+ }
+ row = row1;
+ pSchema = pSchema1;
+ isChosenRowDataRow = isRow1DataRow;
+ chosen_itr = j;
+ } else {
+ if(colIdOfRow2 < pColInfo->info.colId) {
+ k++;
+ continue;
}
+ row = row2;
+ pSchema = pSchema2;
+ chosen_itr = k;
+ isChosenRowDataRow = isRow2DataRow;
+ }
+ if(isChosenRowDataRow) {
+ colId = pSchema->columns[chosen_itr].colId;
+ offset = pSchema->columns[chosen_itr].offset;
+ void *rowBody = memRowDataBody(row);
+ value = tdGetRowDataOfCol(rowBody, (int8_t)pColInfo->info.type, TD_DATA_ROW_HEAD_SIZE + offset);
+ } else {
+ void *rowBody = memRowKvBody(row);
+ SColIdx *pColIdx = kvRowColIdxAt(rowBody, chosen_itr);
+ colId = pColIdx->colId;
+ offset = pColIdx->offset;
+ value = tdGetKvRowDataOfCol(rowBody, pColIdx->offset);
+ }
+
- if (pSchema->columns[j].colId == pColInfo->info.colId) {
- void* value =
- tdGetRowDataOfCol(dataRow, (int8_t)pColInfo->info.type, TD_DATA_ROW_HEAD_SIZE + pSchema->columns[j].offset);
+ if (colId == pColInfo->info.colId) {
+ if(forceSetNull || (!isNull(value, (int8_t)pColInfo->info.type))) {
switch (pColInfo->info.type) {
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
@@ -1495,19 +1671,19 @@ static void copyOneRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity,
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT:
case TSDB_DATA_TYPE_UTINYINT:
- *(uint8_t*)pData = *(uint8_t*)value;
+ *(uint8_t *)pData = *(uint8_t *)value;
break;
case TSDB_DATA_TYPE_SMALLINT:
case TSDB_DATA_TYPE_USMALLINT:
- *(uint16_t*)pData = *(uint16_t*)value;
+ *(uint16_t *)pData = *(uint16_t *)value;
break;
case TSDB_DATA_TYPE_INT:
case TSDB_DATA_TYPE_UINT:
- *(uint32_t*)pData = *(uint32_t*)value;
+ *(uint32_t *)pData = *(uint32_t *)value;
break;
case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_UBIGINT:
- *(uint64_t*)pData = *(uint64_t*)value;
+ *(uint64_t *)pData = *(uint64_t *)value;
break;
case TSDB_DATA_TYPE_FLOAT:
SET_FLOAT_PTR(pData, value);
@@ -1517,121 +1693,54 @@ static void copyOneRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity,
break;
case TSDB_DATA_TYPE_TIMESTAMP:
if (pColInfo->info.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
- *(TSKEY*)pData = tdGetKey(*(TKEY*)value);
+ *(TSKEY *)pData = tdGetKey(*(TKEY *)value);
} else {
- *(TSKEY*)pData = *(TSKEY*)value;
+ *(TSKEY *)pData = *(TSKEY *)value;
}
break;
default:
memcpy(pData, value, pColInfo->info.bytes);
}
+ }
+ i++;
+ if(row == row1) {
j++;
- i++;
- } else { // pColInfo->info.colId < pSchema->columns[j].colId, it is a NULL data
+ } else {
+ k++;
+ }
+ } else {
+ if(forceSetNull) {
if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) {
setVardataNull(pData, pColInfo->info.type);
} else {
setNull(pData, pColInfo->info.type, pColInfo->info.bytes);
}
- i++;
}
+ i++;
}
- } else if (isKvRow(row)) {
- SKVRow kvRow = memRowKvBody(row);
- int32_t k = 0;
- int32_t nKvRowCols = kvRowNCols(kvRow);
+ }
- while (i < numOfCols && k < nKvRowCols) {
+ if(forceSetNull) {
+ while (i < numOfCols) { // the remain columns are all null data
SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i);
- SColIdx* pColIdx = kvRowColIdxAt(kvRow, k);
-
- if (pColIdx->colId < pColInfo->info.colId) {
- ++k;
- continue;
- }
-
if (ASCENDING_TRAVERSE(pQueryHandle->order)) {
pData = (char*)pColInfo->pData + numOfRows * pColInfo->info.bytes;
} else {
pData = (char*)pColInfo->pData + (capacity - numOfRows - 1) * pColInfo->info.bytes;
}
- if (pColIdx->colId == pColInfo->info.colId) {
- // offset of pColIdx for SKVRow including the TD_KV_ROW_HEAD_SIZE
- void* value = tdGetKvRowDataOfCol(kvRow, pColIdx->offset);
- switch (pColInfo->info.type) {
- case TSDB_DATA_TYPE_BINARY:
- case TSDB_DATA_TYPE_NCHAR:
- memcpy(pData, value, varDataTLen(value));
- break;
- case TSDB_DATA_TYPE_NULL:
- case TSDB_DATA_TYPE_BOOL:
- case TSDB_DATA_TYPE_TINYINT:
- case TSDB_DATA_TYPE_UTINYINT:
- *(uint8_t*)pData = *(uint8_t*)value;
- break;
- case TSDB_DATA_TYPE_SMALLINT:
- case TSDB_DATA_TYPE_USMALLINT:
- *(uint16_t*)pData = *(uint16_t*)value;
- break;
- case TSDB_DATA_TYPE_INT:
- case TSDB_DATA_TYPE_UINT:
- *(uint32_t*)pData = *(uint32_t*)value;
- break;
- case TSDB_DATA_TYPE_BIGINT:
- case TSDB_DATA_TYPE_UBIGINT:
- *(uint64_t*)pData = *(uint64_t*)value;
- break;
- case TSDB_DATA_TYPE_FLOAT:
- SET_FLOAT_PTR(pData, value);
- break;
- case TSDB_DATA_TYPE_DOUBLE:
- SET_DOUBLE_PTR(pData, value);
- break;
- case TSDB_DATA_TYPE_TIMESTAMP:
- if (pColInfo->info.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
- *(TSKEY*)pData = tdGetKey(*(TKEY*)value);
- } else {
- *(TSKEY*)pData = *(TSKEY*)value;
- }
- break;
- default:
- memcpy(pData, value, pColInfo->info.bytes);
- }
- ++k;
- ++i;
- continue;
- }
- // If (pColInfo->info.colId < pColIdx->colId), it is NULL data
if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) {
setVardataNull(pData, pColInfo->info.type);
} else {
setNull(pData, pColInfo->info.type, pColInfo->info.bytes);
}
- ++i;
- }
- } else {
- ASSERT(0);
- }
-
- while (i < numOfCols) { // the remain columns are all null data
- SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i);
- if (ASCENDING_TRAVERSE(pQueryHandle->order)) {
- pData = (char*)pColInfo->pData + numOfRows * pColInfo->info.bytes;
- } else {
- pData = (char*)pColInfo->pData + (capacity - numOfRows - 1) * pColInfo->info.bytes;
- }
- if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) {
- setVardataNull(pData, pColInfo->info.type);
- } else {
- setNull(pData, pColInfo->info.type, pColInfo->info.bytes);
+ i++;
}
-
- i++;
}
}
+
static void moveDataToFront(STsdbQueryHandle* pQueryHandle, int32_t numOfRows, int32_t numOfCols) {
if (numOfRows == 0 || ASCENDING_TRAVERSE(pQueryHandle->order)) {
return;
@@ -1798,8 +1907,10 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
// compared with the data from in-memory buffer, to generate the correct timestamp array list
int32_t numOfRows = 0;
- int16_t rv = -1;
- STSchema* pSchema = NULL;
+ int16_t rv1 = -1;
+ int16_t rv2 = -1;
+ STSchema* pSchema1 = NULL;
+ STSchema* pSchema2 = NULL;
int32_t pos = cur->pos;
cur->win = TSWINDOW_INITIALIZER;
@@ -1811,12 +1922,13 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
} else if (pCheckInfo->iter != NULL || pCheckInfo->iiter != NULL) {
SSkipListNode* node = NULL;
do {
- SMemRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order, pCfg->update);
- if (row == NULL) {
+ SMemRow row2 = NULL;
+ SMemRow row1 = getSMemRowInTableMem(pCheckInfo, pQueryHandle->order, pCfg->update, &row2);
+ if (row1 == NULL) {
break;
}
- TSKEY key = memRowKey(row);
+ TSKEY key = memRowKey(row1);
if ((key > pQueryHandle->window.ekey && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
(key < pQueryHandle->window.ekey && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
break;
@@ -1829,12 +1941,16 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
if ((key < tsArray[pos] && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
(key > tsArray[pos] && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
- if (rv != memRowVersion(row)) {
- pSchema = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row));
- rv = memRowVersion(row);
+ if (rv1 != memRowVersion(row1)) {
+ pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1));
+ rv1 = memRowVersion(row1);
}
-
- copyOneRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, row, numOfCols, pTable, pSchema);
+ if(row2 && rv2 != memRowVersion(row2)) {
+ pSchema2 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row2));
+ rv2 = memRowVersion(row2);
+ }
+
+ mergeTwoRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, row1, row2, numOfCols, pTable, pSchema1, pSchema2, true);
numOfRows += 1;
if (cur->win.skey == TSKEY_INITIAL_VAL) {
cur->win.skey = key;
@@ -1847,12 +1963,20 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
moveToNextRowInMem(pCheckInfo);
} else if (key == tsArray[pos]) { // data in buffer has the same timestamp of data in file block, ignore it
if (pCfg->update) {
- if (rv != memRowVersion(row)) {
- pSchema = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row));
- rv = memRowVersion(row);
+ if(pCfg->update == TD_ROW_PARTIAL_UPDATE) {
+ doCopyRowsFromFileBlock(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, pos, pos);
}
-
- copyOneRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, row, numOfCols, pTable, pSchema);
+ if (rv1 != memRowVersion(row1)) {
+ pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1));
+ rv1 = memRowVersion(row1);
+ }
+ if(row2 && rv2 != memRowVersion(row2)) {
+ pSchema2 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row2));
+ rv2 = memRowVersion(row2);
+ }
+
+ bool forceSetNull = pCfg->update != TD_ROW_PARTIAL_UPDATE;
+ mergeTwoRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, row1, row2, numOfCols, pTable, pSchema1, pSchema2, forceSetNull);
numOfRows += 1;
if (cur->win.skey == TSKEY_INITIAL_VAL) {
cur->win.skey = key;
@@ -1877,7 +2001,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
assert(end != -1);
if (tsArray[end] == key) { // the value of key in cache equals to the end timestamp value, ignore it
- if (!pCfg->update) {
+ if (pCfg->update == TD_ROW_DISCARD_UPDATE) {
moveToNextRowInMem(pCheckInfo);
} else {
end -= step;
@@ -2311,6 +2435,7 @@ int32_t tsdbGetFileBlocksDistInfo(TsdbQueryHandleT* queryHandle, STableBlockDist
int32_t code = TSDB_CODE_SUCCESS;
int32_t numOfBlocks = 0;
int32_t numOfTables = (int32_t)taosArrayGetSize(pQueryHandle->pTableCheckInfo);
+ int defaultRows = TSDB_DEFAULT_BLOCK_ROWS(pCfg->maxRowsPerFileBlock);
STimeWindow win = TSWINDOW_INITIALIZER;
while (true) {
@@ -2370,6 +2495,7 @@ int32_t tsdbGetFileBlocksDistInfo(TsdbQueryHandleT* queryHandle, STableBlockDist
pTableBlockInfo->totalRows += numOfRows;
if (numOfRows > pTableBlockInfo->maxRows) pTableBlockInfo->maxRows = numOfRows;
if (numOfRows < pTableBlockInfo->minRows) pTableBlockInfo->minRows = numOfRows;
+ if (numOfRows < defaultRows) pTableBlockInfo->numOfSmallBlocks+=1;
int32_t stepIndex = (numOfRows-1)/TSDB_BLOCK_DIST_STEP_ROWS;
SFileBlockInfo *blockInfo = (SFileBlockInfo*)taosArrayGet(pTableBlockInfo->dataBlockInfos, stepIndex);
blockInfo->numBlocksOfStep++;
@@ -2490,7 +2616,7 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int
STSchema* pSchema = NULL;
do {
- SMemRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order, pCfg->update);
+ SMemRow row = getSMemRowInTableMem(pCheckInfo, pQueryHandle->order, pCfg->update, NULL);
if (row == NULL) {
break;
}
@@ -2512,7 +2638,7 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int
pSchema = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row));
rv = memRowVersion(row);
}
- copyOneRowFromMem(pQueryHandle, maxRowsToRead, numOfRows, row, numOfCols, pTable, pSchema);
+ mergeTwoRowFromMem(pQueryHandle, maxRowsToRead, numOfRows, row, NULL, numOfCols, pTable, pSchema, NULL, true);
if (++numOfRows >= maxRowsToRead) {
moveToNextRowInMem(pCheckInfo);
@@ -2637,7 +2763,7 @@ static bool loadCachedLastRow(STsdbQueryHandle* pQueryHandle) {
if (ret != TSDB_CODE_SUCCESS) {
return false;
}
- copyOneRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, 0, pRow, numOfCols, pCheckInfo->pTableObj, NULL);
+ mergeTwoRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, 0, pRow, NULL, numOfCols, pCheckInfo->pTableObj, NULL, NULL, true);
tfree(pRow);
// update the last key value
@@ -2920,7 +3046,7 @@ static int32_t doGetExternalRow(STsdbQueryHandle* pQueryHandle, int16_t type, SM
}
}
- SArray* row = (type == TSDB_PREV_ROW)? pQueryHandle->prev:pQueryHandle->next;
+ SArray* row = (type == TSDB_PREV_ROW)? pQueryHandle->prev : pQueryHandle->next;
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* pCol = taosArrayGet(pQueryHandle->pColumns, i);
@@ -3383,11 +3509,13 @@ static int32_t tableGroupComparFn(const void *p1, const void *p2, const void *pa
type = TSDB_DATA_TYPE_BINARY;
bytes = tGetTbnameColumnSchema()->bytes;
} else {
- STColumn* pCol = schemaColAt(pTableGroupSupp->pTagSchema, colIndex);
- bytes = pCol->bytes;
- type = pCol->type;
- f1 = tdGetKVRowValOfCol(pTable1->tagVal, pCol->colId);
- f2 = tdGetKVRowValOfCol(pTable2->tagVal, pCol->colId);
+ if (pTableGroupSupp->pTagSchema && colIndex < pTableGroupSupp->pTagSchema->numOfCols) {
+ STColumn* pCol = schemaColAt(pTableGroupSupp->pTagSchema, colIndex);
+ bytes = pCol->bytes;
+ type = pCol->type;
+ f1 = tdGetKVRowValOfCol(pTable1->tagVal, pCol->colId);
+ f2 = tdGetKVRowValOfCol(pTable2->tagVal, pCol->colId);
+ }
}
// this tags value may be NULL
@@ -3785,10 +3913,6 @@ static void* doFreeColumnInfoData(SArray* pColumnInfoData) {
}
static void* destroyTableCheckInfo(SArray* pTableCheckInfo) {
- if (pTableCheckInfo == NULL) {
- return NULL;
- }
-
size_t size = taosArrayGetSize(pTableCheckInfo);
for (int32_t i = 0; i < size; ++i) {
STableCheckInfo* p = taosArrayGet(pTableCheckInfo, i);
@@ -3832,6 +3956,7 @@ void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) {
pQueryHandle->next = doFreeColumnInfoData(pQueryHandle->next);
SIOCostSummary* pCost = &pQueryHandle->cost;
+
tsdbDebug("%p :io-cost summary: head-file read cnt:%"PRIu64", head-file time:%"PRIu64" us, statis-info:%"PRId64" us, datablock:%" PRId64" us, check data:%"PRId64" us, 0x%"PRIx64,
pQueryHandle, pCost->headFileLoad, pCost->headFileLoadTime, pCost->statisInfoLoadTime, pCost->blockLoadTime, pCost->checkForNextTime, pQueryHandle->qId);
diff --git a/src/tsdb/src/tsdbReadImpl.c b/src/tsdb/src/tsdbReadImpl.c
index dd14dc700f85c3126411cf4613651463745f71d5..666a2d357144431093855479f30a4feefbb9ab3b 100644
--- a/src/tsdb/src/tsdbReadImpl.c
+++ b/src/tsdb/src/tsdbReadImpl.c
@@ -244,6 +244,7 @@ int tsdbLoadBlockInfo(SReadH *pReadh, void *pTarget) {
int tsdbLoadBlockData(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo) {
ASSERT(pBlock->numOfSubBlocks > 0);
+ int8_t update = pReadh->pRepo->config.update;
SBlock *iBlock = pBlock;
if (pBlock->numOfSubBlocks > 1) {
@@ -258,7 +259,7 @@ int tsdbLoadBlockData(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo) {
for (int i = 1; i < pBlock->numOfSubBlocks; i++) {
iBlock++;
if (tsdbLoadBlockDataImpl(pReadh, iBlock, pReadh->pDCols[1]) < 0) return -1;
- if (tdMergeDataCols(pReadh->pDCols[0], pReadh->pDCols[1], pReadh->pDCols[1]->numOfRows, NULL) < 0) return -1;
+ if (tdMergeDataCols(pReadh->pDCols[0], pReadh->pDCols[1], pReadh->pDCols[1]->numOfRows, NULL, update != TD_ROW_PARTIAL_UPDATE) < 0) return -1;
}
ASSERT(pReadh->pDCols[0]->numOfRows == pBlock->numOfRows);
@@ -270,6 +271,7 @@ int tsdbLoadBlockData(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo) {
int tsdbLoadBlockDataCols(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo, int16_t *colIds, int numOfColsIds) {
ASSERT(pBlock->numOfSubBlocks > 0);
+ int8_t update = pReadh->pRepo->config.update;
SBlock *iBlock = pBlock;
if (pBlock->numOfSubBlocks > 1) {
@@ -284,7 +286,7 @@ int tsdbLoadBlockDataCols(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo,
for (int i = 1; i < pBlock->numOfSubBlocks; i++) {
iBlock++;
if (tsdbLoadBlockDataColsImpl(pReadh, iBlock, pReadh->pDCols[1], colIds, numOfColsIds) < 0) return -1;
- if (tdMergeDataCols(pReadh->pDCols[0], pReadh->pDCols[1], pReadh->pDCols[1]->numOfRows, NULL) < 0) return -1;
+ if (tdMergeDataCols(pReadh->pDCols[0], pReadh->pDCols[1], pReadh->pDCols[1]->numOfRows, NULL, update != TD_ROW_PARTIAL_UPDATE) < 0) return -1;
}
ASSERT(pReadh->pDCols[0]->numOfRows == pBlock->numOfRows);
@@ -657,4 +659,4 @@ static int tsdbLoadColData(SReadH *pReadh, SDFile *pDFile, SBlock *pBlock, SBloc
}
return 0;
-}
\ No newline at end of file
+}
diff --git a/src/tsdb/src/tsdbRowMergeBuf.c b/src/tsdb/src/tsdbRowMergeBuf.c
new file mode 100644
index 0000000000000000000000000000000000000000..5ce580f70f257ae2a0c00865b5bf54fec0f2b14f
--- /dev/null
+++ b/src/tsdb/src/tsdbRowMergeBuf.c
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "tsdbRowMergeBuf.h"
+#include "tdataformat.h"
+
+// row1 has higher priority
+SMemRow tsdbMergeTwoRows(SMergeBuf *pBuf, SMemRow row1, SMemRow row2, STSchema *pSchema1, STSchema *pSchema2) {
+ if(row2 == NULL) return row1;
+ if(row1 == NULL) return row2;
+ ASSERT(pSchema1->version == memRowVersion(row1));
+ ASSERT(pSchema2->version == memRowVersion(row2));
+
+ if(tsdbMergeBufMakeSureRoom(pBuf, pSchema1, pSchema2) < 0) {
+ return NULL;
+ }
+ return mergeTwoMemRows(*pBuf, row1, row2, pSchema1, pSchema2);
+}
diff --git a/src/util/inc/hash.h b/src/util/inc/hash.h
index 616b844c1388575130a2b1c02033cfedb7ef9e57..a53aa602c1e309ef0c25e370ae084b5a33e4144c 100644
--- a/src/util/inc/hash.h
+++ b/src/util/inc/hash.h
@@ -123,10 +123,9 @@ void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen);
* @param keyLen
* @param fp
* @param d
- * @param dsize
* @return
*/
-void* taosHashGetClone(SHashObj *pHashObj, const void *key, size_t keyLen, void (*fp)(void *), void* d, size_t dsize);
+void* taosHashGetClone(SHashObj *pHashObj, const void *key, size_t keyLen, void (*fp)(void *), void* d);
/**
* remove item with the specified key
diff --git a/src/util/inc/tcache.h b/src/util/inc/tcache.h
index efd51f90ce8739050971856dd4f2dbdd1c44d5a4..e41b544d00e55f7eece904c5957ef9c06063e6c3 100644
--- a/src/util/inc/tcache.h
+++ b/src/util/inc/tcache.h
@@ -83,6 +83,7 @@ typedef struct {
uint8_t deleting; // set the deleting flag to stop refreshing ASAP.
pthread_t refreshWorker;
bool extendLifespan; // auto extend life span when one item is accessed.
+ int64_t checkTick; // tick used to record the check times of the refresh threads
#if defined(LINUX)
pthread_rwlock_t lock;
#else
@@ -177,6 +178,11 @@ void taosCacheCleanup(SCacheObj *pCacheObj);
*/
void taosCacheRefresh(SCacheObj *pCacheObj, __cache_free_fn_t fp);
+/**
+ * stop background refresh worker thread
+ */
+void taosStopCacheRefreshWorker();
+
#ifdef __cplusplus
}
#endif
diff --git a/src/util/inc/tfunctional.h b/src/util/inc/tfunctional.h
new file mode 100644
index 0000000000000000000000000000000000000000..70f54e921da3166645bd43f4e3fe58bc144fe714
--- /dev/null
+++ b/src/util/inc/tfunctional.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+#ifndef TD_TFUNCTIONAL_H
+#define TD_TFUNCTIONAL_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "os.h"
+
+//TODO: hard to use, trying to rewrite it using va_list
+
+typedef void* (*GenericVaFunc)(void* args[]);
+typedef int32_t (*I32VaFunc) (void* args[]);
+typedef void (*VoidVaFunc) (void* args[]);
+
+typedef struct GenericSavedFunc {
+ GenericVaFunc func;
+ void * args[];
+} tGenericSavedFunc;
+
+typedef struct I32SavedFunc {
+ I32VaFunc func;
+ void * args[];
+} tI32SavedFunc;
+
+typedef struct VoidSavedFunc {
+ VoidVaFunc func;
+ void * args[];
+} tVoidSavedFunc;
+
+tGenericSavedFunc* genericSavedFuncInit(GenericVaFunc func, int numOfArgs);
+tI32SavedFunc* i32SavedFuncInit(I32VaFunc func, int numOfArgs);
+tVoidSavedFunc* voidSavedFuncInit(VoidVaFunc func, int numOfArgs);
+void* genericInvoke(tGenericSavedFunc* const pSavedFunc);
+int32_t i32Invoke(tI32SavedFunc* const pSavedFunc);
+void voidInvoke(tVoidSavedFunc* const pSavedFunc);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/util/inc/tlist.h b/src/util/inc/tlist.h
index 6c96ec0b138cfc229ab4a8bde3f3b374bce49dfd..758190454030a7287c3ca95418e7a8fea3618cc5 100644
--- a/src/util/inc/tlist.h
+++ b/src/util/inc/tlist.h
@@ -44,7 +44,7 @@ typedef struct {
#define listNEles(l) (l)->numOfEles
#define listEleSize(l) (l)->eleSize
#define isListEmpty(l) ((l)->numOfEles == 0)
-#define listNodeFree(n) free(n);
+#define listNodeFree(n) free(n)
SList * tdListNew(int eleSize);
void * tdListFree(SList *list);
diff --git a/src/util/inc/tskiplist.h b/src/util/inc/tskiplist.h
index 17f5940b4998c8a5b5e277e153cf7485c3cd6129..d9dc001ccd072e9803d7241bd7e0e3e029a77b3e 100644
--- a/src/util/inc/tskiplist.h
+++ b/src/util/inc/tskiplist.h
@@ -23,6 +23,7 @@ extern "C" {
#include "os.h"
#include "taosdef.h"
#include "tarray.h"
+#include "tfunctional.h"
#define MAX_SKIP_LIST_LEVEL 15
#define SKIP_LIST_RECORD_PERFORMANCE 0
@@ -30,13 +31,17 @@ extern "C" {
// For key property setting
#define SL_ALLOW_DUP_KEY (uint8_t)0x0 // Allow duplicate key exists (for tag index usage)
#define SL_DISCARD_DUP_KEY (uint8_t)0x1 // Discard duplicate key (for data update=0 case)
-#define SL_UPDATE_DUP_KEY (uint8_t)0x2 // Update duplicate key by remove/insert (for data update=1 case)
+#define SL_UPDATE_DUP_KEY (uint8_t)0x2 // Update duplicate key by remove/insert (for data update!=0 case)
+
// For thread safety setting
#define SL_THREAD_SAFE (uint8_t)0x4
typedef char *SSkipListKey;
typedef char *(*__sl_key_fn_t)(const void *);
+typedef void (*sl_patch_row_fn_t)(void * pDst, const void * pSrc);
+typedef void* (*iter_next_fn_t)(void *iter);
+
typedef struct SSkipListNode {
uint8_t level;
void * pData;
@@ -95,6 +100,12 @@ typedef struct tSkipListState {
uint64_t nTotalElapsedTimeForInsert;
} tSkipListState;
+typedef enum {
+ SSkipListPutSuccess = 0,
+ SSkipListPutEarlyStop = 1,
+ SSkipListPutSkipOne = 2
+} SSkipListPutStatus;
+
typedef struct SSkipList {
unsigned int seed;
__compar_fn_t comparFn;
@@ -111,6 +122,7 @@ typedef struct SSkipList {
#if SKIP_LIST_RECORD_PERFORMANCE
tSkipListState state; // skiplist state
#endif
+ tGenericSavedFunc* insertHandleFn;
} SSkipList;
typedef struct SSkipListIterator {
@@ -118,7 +130,7 @@ typedef struct SSkipListIterator {
SSkipListNode *cur;
int32_t step; // the number of nodes that have been checked already
int32_t order; // order of the iterator
- SSkipListNode *next; // next points to the true qualified node in skip list
+ SSkipListNode *next; // next points to the true qualified node in skiplist
} SSkipListIterator;
#define SL_IS_THREAD_SAFE(s) (((s)->flags) & SL_THREAD_SAFE)
@@ -132,7 +144,7 @@ SSkipList *tSkipListCreate(uint8_t maxLevel, uint8_t keyType, uint16_t keyLen, _
__sl_key_fn_t fn);
void tSkipListDestroy(SSkipList *pSkipList);
SSkipListNode * tSkipListPut(SSkipList *pSkipList, void *pData);
-void tSkipListPutBatch(SSkipList *pSkipList, void **ppData, int ndata);
+void tSkipListPutBatchByIter(SSkipList *pSkipList, void *iter, iter_next_fn_t iterate);
SArray * tSkipListGet(SSkipList *pSkipList, SSkipListKey pKey);
void tSkipListPrint(SSkipList *pSkipList, int16_t nlevel);
SSkipListIterator *tSkipListCreateIter(SSkipList *pSkipList);
diff --git a/src/util/src/hash.c b/src/util/src/hash.c
index d7bee9b67cad8fe91a182d76a443c04fd82be44c..2e18f36a17f9b3112c4d1747afa37944a1e93d28 100644
--- a/src/util/src/hash.c
+++ b/src/util/src/hash.c
@@ -294,10 +294,10 @@ int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *da
}
void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen) {
- return taosHashGetClone(pHashObj, key, keyLen, NULL, NULL, 0);
+ return taosHashGetClone(pHashObj, key, keyLen, NULL, NULL);
}
-void* taosHashGetClone(SHashObj *pHashObj, const void *key, size_t keyLen, void (*fp)(void *), void* d, size_t dsize) {
+void* taosHashGetClone(SHashObj *pHashObj, const void *key, size_t keyLen, void (*fp)(void *), void* d) {
if (taosHashTableEmpty(pHashObj) || keyLen == 0 || key == NULL) {
return NULL;
}
diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c
index c6ee79e101d4b79aacce553ccb8e7e171f23aef6..69b3741e13c9e0b3ee00615a29851a3f690a1e84 100644
--- a/src/util/src/tcache.c
+++ b/src/util/src/tcache.c
@@ -54,6 +54,45 @@ static FORCE_INLINE void __cache_lock_destroy(SCacheObj *pCacheObj) {
#endif
}
+/**
+ * do cleanup the taos cache
+ * @param pCacheObj
+ */
+static void doCleanupDataCache(SCacheObj *pCacheObj);
+
+/**
+ * refresh cache to remove data in both hash list and trash, if any nodes' refcount == 0, every pCacheObj->refreshTime
+ * @param handle Cache object handle
+ */
+static void* taosCacheTimedRefresh(void *handle);
+
+static pthread_t cacheRefreshWorker = {0};
+static pthread_once_t cacheThreadInit = PTHREAD_ONCE_INIT;
+static pthread_mutex_t guard = PTHREAD_MUTEX_INITIALIZER;
+static SArray* pCacheArrayList = NULL;
+static bool stopRefreshWorker = false;
+
+static void doInitRefreshThread(void) {
+ pCacheArrayList = taosArrayInit(4, POINTER_BYTES);
+
+ pthread_attr_t thattr;
+ pthread_attr_init(&thattr);
+ pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
+
+ pthread_create(&cacheRefreshWorker, &thattr, taosCacheTimedRefresh, NULL);
+ pthread_attr_destroy(&thattr);
+}
+
+pthread_t doRegisterCacheObj(SCacheObj* pCacheObj) {
+ pthread_once(&cacheThreadInit, doInitRefreshThread);
+
+ pthread_mutex_lock(&guard);
+ taosArrayPush(pCacheArrayList, &pCacheObj);
+ pthread_mutex_unlock(&guard);
+
+ return cacheRefreshWorker;
+}
+
/**
* @param key key of object for hash, usually a null-terminated string
* @param keyLen length of key
@@ -93,11 +132,11 @@ static FORCE_INLINE void taosCacheReleaseNode(SCacheObj *pCacheObj, SCacheDataNo
return;
}
- pCacheObj->totalSize -= pNode->size;
+ atomic_sub_fetch_64(&pCacheObj->totalSize, pNode->size);
int32_t size = (int32_t)taosHashGetSize(pCacheObj->pHashTable);
assert(size > 0);
- uDebug("cache:%s, key:%p, %p is destroyed from cache, size:%dbytes, totalNum:%d size:%" PRId64 "bytes",
+ uDebug("cache:%s, key:%p, %p is destroyed from cache, size:%dbytes, total num:%d size:%" PRId64 "bytes",
pCacheObj->name, pNode->key, pNode->data, pNode->size, size - 1, pCacheObj->totalSize);
if (pCacheObj->freeFp) {
@@ -142,19 +181,9 @@ static FORCE_INLINE void doDestroyTrashcanElem(SCacheObj* pCacheObj, STrashElem
free(pElem);
}
-/**
- * do cleanup the taos cache
- * @param pCacheObj
- */
-static void doCleanupDataCache(SCacheObj *pCacheObj);
-
-/**
- * refresh cache to remove data in both hash list and trash, if any nodes' refcount == 0, every pCacheObj->refreshTime
- * @param handle Cache object handle
- */
-static void* taosCacheTimedRefresh(void *handle);
-
SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool extendLifespan, __cache_free_fn_t fn, const char* cacheName) {
+ const int32_t SLEEP_DURATION = 500; //500 ms
+
if (refreshTimeInSeconds <= 0) {
return NULL;
}
@@ -174,9 +203,10 @@ SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool ext
}
// set free cache node callback function
- pCacheObj->freeFp = fn;
+ pCacheObj->freeFp = fn;
pCacheObj->refreshTime = refreshTimeInSeconds * 1000;
- pCacheObj->extendLifespan = extendLifespan;
+ pCacheObj->checkTick = pCacheObj->refreshTime / SLEEP_DURATION;
+ pCacheObj->extendLifespan = extendLifespan; // the TTL after the last access
if (__cache_lock_init(pCacheObj) != 0) {
taosHashCleanup(pCacheObj->pHashTable);
@@ -186,13 +216,7 @@ SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool ext
return NULL;
}
- pthread_attr_t thattr;
- pthread_attr_init(&thattr);
- pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
-
- pthread_create(&pCacheObj->refreshWorker, &thattr, taosCacheTimedRefresh, pCacheObj);
-
- pthread_attr_destroy(&thattr);
+ doRegisterCacheObj(pCacheObj);
return pCacheObj;
}
@@ -228,6 +252,7 @@ void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const v
pCacheObj->freeFp(p->data);
}
+ atomic_sub_fetch_64(&pCacheObj->totalSize, p->size);
tfree(p);
} else {
taosAddToTrashcan(pCacheObj, p);
@@ -278,7 +303,7 @@ void *taosCacheAcquireByKey(SCacheObj *pCacheObj, const void *key, size_t keyLen
}
SCacheDataNode* ptNode = NULL;
- taosHashGetClone(pCacheObj->pHashTable, key, keyLen, incRefFn, &ptNode, sizeof(void*));
+ taosHashGetClone(pCacheObj->pHashTable, key, keyLen, incRefFn, &ptNode);
void* pData = (ptNode != NULL)? ptNode->data:NULL;
@@ -364,7 +389,7 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
if (pCacheObj->extendLifespan && (!inTrashcan) && (!_remove)) {
atomic_store_64(&pNode->expireTime, pNode->lifespan + taosGetTimestampMs());
- uDebug("cache:%s data:%p extend expire time: %"PRId64, pCacheObj->name, pNode->data, pNode->expireTime);
+ uDebug("cache:%s, data:%p extend expire time: %"PRId64, pCacheObj->name, pNode->data, pNode->expireTime);
}
if (_remove) {
@@ -510,8 +535,10 @@ void taosCacheCleanup(SCacheObj *pCacheObj) {
}
pCacheObj->deleting = 1;
- if (taosCheckPthreadValid(pCacheObj->refreshWorker)) {
- pthread_join(pCacheObj->refreshWorker, NULL);
+
+ // wait for the refresh thread quit before destroying the cache object.
+ while(atomic_load_8(&pCacheObj->deleting) != 0) {
+ taosMsleep(50);
}
uInfo("cache:%s will be cleaned up", pCacheObj->name);
@@ -650,50 +677,79 @@ static void doCacheRefresh(SCacheObj* pCacheObj, int64_t time, __cache_free_fn_t
}
void* taosCacheTimedRefresh(void *handle) {
- SCacheObj* pCacheObj = handle;
- if (pCacheObj == NULL) {
- uDebug("object is destroyed. no refresh retry");
- return NULL;
- }
+ assert(pCacheArrayList != NULL);
+ uDebug("cache refresh thread starts");
- setThreadName("cacheTimedRefre");
+ setThreadName("cacheRefresh");
const int32_t SLEEP_DURATION = 500; //500 ms
- int64_t totalTick = pCacheObj->refreshTime / SLEEP_DURATION;
-
int64_t count = 0;
- while(1) {
- taosMsleep(500);
- // check if current cache object will be deleted every 500ms.
- if (pCacheObj->deleting) {
- uDebug("%s refresh threads quit", pCacheObj->name);
- break;
+ while(1) {
+ taosMsleep(SLEEP_DURATION);
+ if (stopRefreshWorker) {
+ goto _end;
}
- if (++count < totalTick) {
- continue;
- }
+ pthread_mutex_lock(&guard);
+ size_t size = taosArrayGetSize(pCacheArrayList);
+ pthread_mutex_unlock(&guard);
- // reset the count value
- count = 0;
- size_t elemInHash = taosHashGetSize(pCacheObj->pHashTable);
- if (elemInHash + pCacheObj->numOfElemsInTrash == 0) {
- continue;
- }
+ count += 1;
- uDebug("%s refresh thread timed scan", pCacheObj->name);
- pCacheObj->statistics.refreshCount++;
+ for(int32_t i = 0; i < size; ++i) {
+ pthread_mutex_lock(&guard);
+ SCacheObj* pCacheObj = taosArrayGetP(pCacheArrayList, i);
- // refresh data in hash table
- if (elemInHash > 0) {
- int64_t now = taosGetTimestampMs();
- doCacheRefresh(pCacheObj, now, NULL);
- }
+ if (pCacheObj == NULL) {
+ uError("object is destroyed. ignore and try next");
+ pthread_mutex_unlock(&guard);
+ continue;
+ }
+
+ // check if current cache object will be deleted every 500ms.
+ if (pCacheObj->deleting) {
+ taosArrayRemove(pCacheArrayList, i);
+ size = taosArrayGetSize(pCacheArrayList);
+
+ uDebug("%s is destroying, remove it from refresh list, remain cache obj:%"PRIzu, pCacheObj->name, size);
+ pCacheObj->deleting = 0; //reset the deleting flag to enable pCacheObj to continue releasing resources.
+
+ pthread_mutex_unlock(&guard);
+ continue;
+ }
+
+ pthread_mutex_unlock(&guard);
+
+ if ((count % pCacheObj->checkTick) != 0) {
+ continue;
+ }
- taosTrashcanEmpty(pCacheObj, false);
+ size_t elemInHash = taosHashGetSize(pCacheObj->pHashTable);
+ if (elemInHash + pCacheObj->numOfElemsInTrash == 0) {
+ continue;
+ }
+
+ uDebug("%s refresh thread scan", pCacheObj->name);
+ pCacheObj->statistics.refreshCount++;
+
+ // refresh data in hash table
+ if (elemInHash > 0) {
+ int64_t now = taosGetTimestampMs();
+ doCacheRefresh(pCacheObj, now, NULL);
+ }
+
+ taosTrashcanEmpty(pCacheObj, false);
+ }
}
+ _end:
+ taosArrayDestroy(pCacheArrayList);
+
+ pCacheArrayList = NULL;
+ pthread_mutex_destroy(&guard);
+
+ uDebug("cache refresh thread quits");
return NULL;
}
@@ -705,3 +761,7 @@ void taosCacheRefresh(SCacheObj *pCacheObj, __cache_free_fn_t fp) {
int64_t now = taosGetTimestampMs();
doCacheRefresh(pCacheObj, now, fp);
}
+
+void taosStopCacheRefreshWorker() {
+ stopRefreshWorker = false;
+}
\ No newline at end of file
diff --git a/src/util/src/tcompression.c b/src/util/src/tcompression.c
index a11f6a2f3dabd94b3c73984ed0e78770a66cb32d..48bba75926415752cfd777242a55ef71c5c96c2c 100644
--- a/src/util/src/tcompression.c
+++ b/src/util/src/tcompression.c
@@ -85,7 +85,7 @@ int tsCompressInit(){
if(lossyFloat == false && lossyDouble == false)
return 0;
- tdszInit(fPrecision, dPrecision, maxIntervals, intervals, Compressor);
+ tdszInit(fPrecision, dPrecision, maxRange, curRange, Compressor);
if(lossyFloat)
uInfo("lossy compression float is opened. ");
if(lossyDouble)
@@ -159,7 +159,7 @@ int tsCompressINTImp(const char *const input, const int nelements, char *const o
break;
}
// Get difference.
- if (!safeInt64Add(curr_value, -prev_value)) goto _copy_and_exit;
+ if (!safeInt64Add(curr_value, -prev_value_tmp)) goto _copy_and_exit;
int64_t diff = curr_value - prev_value_tmp;
// Zigzag encode the value.
@@ -480,6 +480,10 @@ int tsCompressTimestampImp(const char *const input, const int nelements, char *c
int64_t *istream = (int64_t *)input;
int64_t prev_value = istream[0];
+ if(prev_value >= 0x8000000000000000) {
+ uWarn("compression timestamp is over signed long long range. ts = 0x%"PRIx64" \n", prev_value);
+ goto _exit_over;
+ }
int64_t prev_delta = -prev_value;
uint8_t flags = 0, flag1 = 0, flag2 = 0;
uint64_t dd1 = 0, dd2 = 0;
@@ -993,4 +997,4 @@ int tsDecompressDoubleLossyImp(const char * input, int compressedSize, const int
// decompressed with sz
return tdszDecompress(SZ_DOUBLE, input + 1, compressedSize - 1, nelements, output);
}
-#endif
\ No newline at end of file
+#endif
diff --git a/src/util/src/tconfig.c b/src/util/src/tconfig.c
index ea83debfdc2cba27e1f78d1ae7e7a7c29dbd6aec..5a3dc3f9bcdee41f974e48f22b27beb2a1eb5a35 100644
--- a/src/util/src/tconfig.c
+++ b/src/util/src/tconfig.c
@@ -484,6 +484,9 @@ void taosPrintGlobalCfg() {
case TAOS_CFG_VTYPE_FLOAT:
uInfo(" %s:%s%f%s", cfg->option, blank, *((float *)cfg->ptr), tsGlobalUnit[cfg->unitType]);
break;
+ case TAOS_CFG_VTYPE_DOUBLE:
+ uInfo(" %s:%s%f%s", cfg->option, blank, *((double *)cfg->ptr), tsGlobalUnit[cfg->unitType]);
+ break;
case TAOS_CFG_VTYPE_STRING:
case TAOS_CFG_VTYPE_IPSTR:
case TAOS_CFG_VTYPE_DIRECTORY:
diff --git a/src/util/src/terror.c b/src/util/src/terror.c
index 46a33569b2136c3e2341da3a311960a4caef4bf6..42fc76e6c94227b6d8b5fb886239c42e19fc064d 100644
--- a/src/util/src/terror.c
+++ b/src/util/src/terror.c
@@ -110,6 +110,8 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSC_DB_NOT_SELECTED, "Database not specifie
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_TABLE_NAME, "Table does not exist")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_EXCEED_SQL_LIMIT, "SQL statement too long, check maxSQLLength config")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_FILE_EMPTY, "File is empty")
+TAOS_DEFINE_ERROR(TSDB_CODE_TSC_LINE_SYNTAX_ERROR, "Syntax error in Line")
+TAOS_DEFINE_ERROR(TSDB_CODE_TSC_NO_META_CACHED, "No table meta cached")
// mnode
TAOS_DEFINE_ERROR(TSDB_CODE_MND_MSG_NOT_PROCESSED, "Message not processed")
diff --git a/src/util/src/tfunctional.c b/src/util/src/tfunctional.c
new file mode 100644
index 0000000000000000000000000000000000000000..c470a2b8aefc11141c9125e60c1c45fcbb949f09
--- /dev/null
+++ b/src/util/src/tfunctional.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "tfunctional.h"
+#include "tarray.h"
+
+
+tGenericSavedFunc* genericSavedFuncInit(GenericVaFunc func, int numOfArgs) {
+ tGenericSavedFunc* pSavedFunc = malloc(sizeof(tGenericSavedFunc) + numOfArgs * (sizeof(void*)));
+ pSavedFunc->func = func;
+ return pSavedFunc;
+}
+
+tI32SavedFunc* i32SavedFuncInit(I32VaFunc func, int numOfArgs) {
+ tI32SavedFunc* pSavedFunc = malloc(sizeof(tI32SavedFunc) + numOfArgs * sizeof(void *));
+ pSavedFunc->func = func;
+ return pSavedFunc;
+}
+
+tVoidSavedFunc* voidSavedFuncInit(VoidVaFunc func, int numOfArgs) {
+ tVoidSavedFunc* pSavedFunc = malloc(sizeof(tVoidSavedFunc) + numOfArgs * sizeof(void*));
+ pSavedFunc->func = func;
+ return pSavedFunc;
+}
+
+FORCE_INLINE void* genericInvoke(tGenericSavedFunc* const pSavedFunc) {
+ return pSavedFunc->func(pSavedFunc->args);
+}
+
+FORCE_INLINE int32_t i32Invoke(tI32SavedFunc* const pSavedFunc) {
+ return pSavedFunc->func(pSavedFunc->args);
+}
+
+FORCE_INLINE void voidInvoke(tVoidSavedFunc* const pSavedFunc) {
+ if(pSavedFunc) pSavedFunc->func(pSavedFunc->args);
+}
diff --git a/src/util/src/tlog.c b/src/util/src/tlog.c
index 88f57e8ac24cd207fc44f581564f45a9c33c348e..1ce3eadf58432337511d0d600848ad334b96fc91 100644
--- a/src/util/src/tlog.c
+++ b/src/util/src/tlog.c
@@ -178,8 +178,6 @@ static void *taosThreadToOpenNewFile(void *param) {
char keepName[LOG_FILE_NAME_LEN + 20];
sprintf(keepName, "%s.%d", tsLogObj.logName, tsLogObj.flag);
- setThreadName("openNewFile");
-
tsLogObj.flag ^= 1;
tsLogObj.lines = 0;
char name[LOG_FILE_NAME_LEN + 20];
@@ -689,12 +687,9 @@ static void taosWriteLog(SLogBuff *tLogBuff) {
static void *taosAsyncOutputLog(void *param) {
SLogBuff *tLogBuff = (SLogBuff *)param;
-
- setThreadName("asyncOutputLog");
+ setThreadName("log");
while (1) {
- //tsem_wait(&(tLogBuff->buffNotEmpty));
-
taosMsleep(writeInterval);
// Polling the buffer
diff --git a/src/util/src/tsched.c b/src/util/src/tsched.c
index 3d3dfd989926c4acf8b32d8c7df724cb2d1ac079..b86ebb38bcd6446b56357f9667636403e14d688c 100644
--- a/src/util/src/tsched.c
+++ b/src/util/src/tsched.c
@@ -122,7 +122,9 @@ void *taosProcessSchedQueue(void *scheduler) {
SSchedQueue *pSched = (SSchedQueue *)scheduler;
int ret = 0;
- setThreadName("schedQ");
+ char name[16] = {0};
+ snprintf(name, tListLen(name), "%s-taskQ", pSched->label);
+ setThreadName(name);
while (1) {
if ((ret = tsem_wait(&pSched->fullSem)) != 0) {
diff --git a/src/util/src/tskiplist.c b/src/util/src/tskiplist.c
index 082b454bb59abb6f43008a63078f665fd38ff32d..b464519ba66776ba13ce2964070d19a2a4430bfb 100644
--- a/src/util/src/tskiplist.c
+++ b/src/util/src/tskiplist.c
@@ -16,6 +16,7 @@
#include "tskiplist.h"
#include "os.h"
#include "tcompare.h"
+#include "tdataformat.h"
#include "tulog.h"
#include "tutil.h"
@@ -31,6 +32,7 @@ static SSkipListNode *tSkipListNewNode(uint8_t level);
static SSkipListNode *tSkipListPutImpl(SSkipList *pSkipList, void *pData, SSkipListNode **direction, bool isForward,
bool hasDup);
+
static FORCE_INLINE int tSkipListWLock(SSkipList *pSkipList);
static FORCE_INLINE int tSkipListRLock(SSkipList *pSkipList);
static FORCE_INLINE int tSkipListUnlock(SSkipList *pSkipList);
@@ -80,6 +82,7 @@ SSkipList *tSkipListCreate(uint8_t maxLevel, uint8_t keyType, uint16_t keyLen, _
#if SKIP_LIST_RECORD_PERFORMANCE
pSkipList->state.nTotalMemSize += sizeof(SSkipList);
#endif
+ pSkipList->insertHandleFn = NULL;
return pSkipList;
}
@@ -97,6 +100,8 @@ void tSkipListDestroy(SSkipList *pSkipList) {
tSkipListFreeNode(pTemp);
}
+ tfree(pSkipList->insertHandleFn);
+
tSkipListUnlock(pSkipList);
if (pSkipList->lock != NULL) {
pthread_rwlock_destroy(pSkipList->lock);
@@ -124,8 +129,7 @@ SSkipListNode *tSkipListPut(SSkipList *pSkipList, void *pData) {
return pNode;
}
-// Put a batch of data into skiplist. The batch of data must be in ascending order
-void tSkipListPutBatch(SSkipList *pSkipList, void **ppData, int ndata) {
+void tSkipListPutBatchByIter(SSkipList *pSkipList, void *iter, iter_next_fn_t iterate) {
SSkipListNode *backward[MAX_SKIP_LIST_LEVEL] = {0};
SSkipListNode *forward[MAX_SKIP_LIST_LEVEL] = {0};
bool hasDup = false;
@@ -135,17 +139,21 @@ void tSkipListPutBatch(SSkipList *pSkipList, void **ppData, int ndata) {
tSkipListWLock(pSkipList);
+ void* pData = iterate(iter);
+ if(pData == NULL) return;
+
// backward to put the first data
- hasDup = tSkipListGetPosToPut(pSkipList, backward, ppData[0]);
- tSkipListPutImpl(pSkipList, ppData[0], backward, false, hasDup);
+ hasDup = tSkipListGetPosToPut(pSkipList, backward, pData);
+
+ tSkipListPutImpl(pSkipList, pData, backward, false, hasDup);
for (int level = 0; level < pSkipList->maxLevel; level++) {
forward[level] = SL_NODE_GET_BACKWARD_POINTER(backward[level], level);
}
// forward to put the rest of data
- for (int idata = 1; idata < ndata; idata++) {
- pDataKey = pSkipList->keyFn(ppData[idata]);
+ while ((pData = iterate(iter)) != NULL) {
+ pDataKey = pSkipList->keyFn(pData);
hasDup = false;
// Compare max key
@@ -186,9 +194,8 @@ void tSkipListPutBatch(SSkipList *pSkipList, void **ppData, int ndata) {
}
}
- tSkipListPutImpl(pSkipList, ppData[idata], forward, true, hasDup);
+ tSkipListPutImpl(pSkipList, pData, forward, true, hasDup);
}
-
tSkipListUnlock(pSkipList);
}
@@ -661,18 +668,40 @@ static SSkipListNode *tSkipListPutImpl(SSkipList *pSkipList, void *pData, SSkipL
uint8_t dupMode = SL_DUP_MODE(pSkipList);
SSkipListNode *pNode = NULL;
- if (hasDup && (dupMode == SL_DISCARD_DUP_KEY || dupMode == SL_UPDATE_DUP_KEY)) {
+ if (hasDup && (dupMode != SL_ALLOW_DUP_KEY)) {
if (dupMode == SL_UPDATE_DUP_KEY) {
if (isForward) {
pNode = SL_NODE_GET_FORWARD_POINTER(direction[0], 0);
} else {
pNode = SL_NODE_GET_BACKWARD_POINTER(direction[0], 0);
}
- atomic_store_ptr(&(pNode->pData), pData);
+ if (pSkipList->insertHandleFn) {
+ pSkipList->insertHandleFn->args[0] = pData;
+ pSkipList->insertHandleFn->args[1] = pNode->pData;
+ pData = genericInvoke(pSkipList->insertHandleFn);
+ }
+ if(pData) {
+ atomic_store_ptr(&(pNode->pData), pData);
+ }
+ } else {
+ //for compatiblity, duplicate key inserted when update=0 should be also calculated as affected rows!
+ if(pSkipList->insertHandleFn) {
+ pSkipList->insertHandleFn->args[0] = NULL;
+ pSkipList->insertHandleFn->args[1] = NULL;
+ genericInvoke(pSkipList->insertHandleFn);
+ }
}
} else {
pNode = tSkipListNewNode(getSkipListRandLevel(pSkipList));
if (pNode != NULL) {
+ // insertHandleFn will be assigned only for timeseries data,
+ // in which case, pData is pointed to an memory to be freed later;
+ // while for metadata, the mem alloc will not be called.
+ if (pSkipList->insertHandleFn) {
+ pSkipList->insertHandleFn->args[0] = pData;
+ pSkipList->insertHandleFn->args[1] = NULL;
+ pData = genericInvoke(pSkipList->insertHandleFn);
+ }
pNode->pData = pData;
tSkipListDoInsert(pSkipList, direction, pNode, isForward);
diff --git a/src/util/src/tstrbuild.c b/src/util/src/tstrbuild.c
index 61a6d67952a73b8efd0c20a45214a1546f1f0258..eec21d18354a4141be92530cda1a953e5efd89a8 100644
--- a/src/util/src/tstrbuild.c
+++ b/src/util/src/tstrbuild.c
@@ -69,12 +69,12 @@ void taosStringBuilderAppendNull(SStringBuilder* sb) { taosStringBuilderAppendSt
void taosStringBuilderAppendInteger(SStringBuilder* sb, int64_t v) {
char buf[64];
- size_t len = sprintf(buf, "%" PRId64, v);
- taosStringBuilderAppendStringLen(sb, buf, len);
+ size_t len = snprintf(buf, sizeof(buf), "%" PRId64, v);
+ taosStringBuilderAppendStringLen(sb, buf, MIN(len, sizeof(buf)));
}
void taosStringBuilderAppendDouble(SStringBuilder* sb, double v) {
- char buf[64];
- size_t len = sprintf(buf, "%.9lf", v);
- taosStringBuilderAppendStringLen(sb, buf, len);
+ char buf[512];
+ size_t len = snprintf(buf, sizeof(buf), "%.9lf", v);
+ taosStringBuilderAppendStringLen(sb, buf, MIN(len, sizeof(buf)));
}
diff --git a/src/util/tests/trefTest.c b/src/util/tests/trefTest.c
index fe3dcab201de3f5b5068c997f3759a5b8397b5b7..e01da070afd3333cf02c25b51d2e9711c1616fb0 100644
--- a/src/util/tests/trefTest.c
+++ b/src/util/tests/trefTest.c
@@ -35,8 +35,6 @@ void *addRef(void *param) {
SRefSpace *pSpace = (SRefSpace *)param;
int id;
- setThreadName("addRef");
-
for (int i=0; i < pSpace->steps; ++i) {
printf("a");
id = random() % pSpace->refNum;
@@ -54,8 +52,6 @@ void *removeRef(void *param) {
SRefSpace *pSpace = (SRefSpace *)param;
int id, code;
- setThreadName("removeRef");
-
for (int i=0; i < pSpace->steps; ++i) {
printf("d");
id = random() % pSpace->refNum;
@@ -74,8 +70,6 @@ void *acquireRelease(void *param) {
SRefSpace *pSpace = (SRefSpace *)param;
int id;
- setThreadName("acquireRelease");
-
for (int i=0; i < pSpace->steps; ++i) {
printf("a");
@@ -97,8 +91,6 @@ void myfree(void *p) {
void *openRefSpace(void *param) {
SRefSpace *pSpace = (SRefSpace *)param;
- setThreadName("openRefSpace");
-
printf("c");
pSpace->rsetId = taosOpenRef(50, myfree);
diff --git a/src/vnode/src/vnodeMgmt.c b/src/vnode/src/vnodeMgmt.c
index 8b17d3a5f2b8871aa83d4daf81ff936773de736a..67b9ce5ad91c2e378c0d67080e9be5c02df81129 100644
--- a/src/vnode/src/vnodeMgmt.c
+++ b/src/vnode/src/vnodeMgmt.c
@@ -93,7 +93,7 @@ static void vnodeIncRef(void *ptNode) {
void *vnodeAcquire(int32_t vgId) {
SVnodeObj *pVnode = NULL;
if (tsVnodesHash != NULL) {
- taosHashGetClone(tsVnodesHash, &vgId, sizeof(int32_t), vnodeIncRef, &pVnode, sizeof(void *));
+ taosHashGetClone(tsVnodesHash, &vgId, sizeof(int32_t), vnodeIncRef, &pVnode);
}
if (pVnode == NULL) {
@@ -110,7 +110,9 @@ void vnodeRelease(void *vparam) {
if (vparam == NULL) return;
int32_t refCount = atomic_sub_fetch_32(&pVnode->refCount, 1);
- vTrace("vgId:%d, release vnode, refCount:%d pVnode:%p", pVnode->vgId, refCount, pVnode);
+ int32_t vgId = pVnode->vgId;
+
+ vTrace("vgId:%d, release vnode, refCount:%d pVnode:%p", vgId, refCount, pVnode);
assert(refCount >= 0);
if (refCount > 0) {
@@ -118,10 +120,10 @@ void vnodeRelease(void *vparam) {
tsem_post(&pVnode->sem);
}
} else {
- vDebug("vgId:%d, vnode will be destroyed, refCount:%d pVnode:%p", pVnode->vgId, refCount, pVnode);
+ vDebug("vgId:%d, vnode will be destroyed, refCount:%d pVnode:%p", vgId, refCount, pVnode);
vnodeDestroyInMWorker(pVnode);
int32_t count = taosHashGetSize(tsVnodesHash);
- vDebug("vgId:%d, vnode is destroyed, vnodes:%d", pVnode->vgId, count);
+ vDebug("vgId:%d, vnode is destroyed, vnodes:%d", vgId, count);
}
}
diff --git a/src/vnode/src/vnodeWorker.c b/src/vnode/src/vnodeWorker.c
index e94c99cbea99139a21fb7fb64c729a12d3091349..7fcc393746639777af20730f9daf8d7533c2b5e6 100644
--- a/src/vnode/src/vnodeWorker.c
+++ b/src/vnode/src/vnodeWorker.c
@@ -25,7 +25,7 @@
typedef enum {
VNODE_WORKER_ACTION_CLEANUP,
- VNODE_WORKER_ACTION_DESTROUY
+ VNODE_WORKER_ACTION_DESTROY
} EVMWorkerAction;
typedef struct {
@@ -155,7 +155,7 @@ int32_t vnodeCleanupInMWorker(SVnodeObj *pVnode) {
int32_t vnodeDestroyInMWorker(SVnodeObj *pVnode) {
vTrace("vgId:%d, will destroy in vmworker", pVnode->vgId);
- return vnodeWriteIntoMWorker(pVnode, VNODE_WORKER_ACTION_DESTROUY, NULL);
+ return vnodeWriteIntoMWorker(pVnode, VNODE_WORKER_ACTION_DESTROY, NULL);
}
static void vnodeFreeMWorkerMsg(SVMWorkerMsg *pMsg) {
@@ -179,7 +179,7 @@ static void vnodeProcessMWorkerMsg(SVMWorkerMsg *pMsg) {
case VNODE_WORKER_ACTION_CLEANUP:
vnodeCleanUp(pMsg->pVnode);
break;
- case VNODE_WORKER_ACTION_DESTROUY:
+ case VNODE_WORKER_ACTION_DESTROY:
vnodeDestroy(pMsg->pVnode);
break;
default:
diff --git a/src/wal/src/walMgmt.c b/src/wal/src/walMgmt.c
index 45f65b2c2fc9ae5412f471805a3244644e590638..05324d31eec56ee74b81c70dc451eadf83d518d2 100644
--- a/src/wal/src/walMgmt.c
+++ b/src/wal/src/walMgmt.c
@@ -192,7 +192,7 @@ static void walFsyncAll() {
static void *walThreadFunc(void *param) {
int stop = 0;
- setThreadName("walThrd");
+ setThreadName("wal");
while (1) {
walUpdateSeq();
walFsyncAll();
diff --git a/tests/Jenkinsfile b/tests/Jenkinsfile
index c75427b5f4e568553dbcd9e2686f529a2745c029..eeb56bd3b98deaebdcd4ef95e9aae514fb68de9b 100644
--- a/tests/Jenkinsfile
+++ b/tests/Jenkinsfile
@@ -1,5 +1,32 @@
def pre_test(){
+ sh '''
+ sudo rmtaos||echo 'no taosd installed'
+ '''
+ sh '''
+ cd ${WKC}
+ git reset --hard
+ git checkout $BRANCH_NAME
+ git pull
+ git submodule update
+ cd ${WK}
+ git reset --hard
+ git checkout $BRANCH_NAME
+ git pull
+ export TZ=Asia/Harbin
+ date
+ rm -rf ${WK}/debug
+ mkdir debug
+ cd debug
+ cmake .. > /dev/null
+ make > /dev/null
+ make install > /dev/null
+ pip3 install ${WKC}/src/connector/python
+ '''
+ return 1
+}
+def pre_test_p(){
+
sh '''
sudo rmtaos||echo 'no taosd installed'
'''
@@ -39,7 +66,7 @@ pipeline {
stage('pytest') {
agent{label 'slad1'}
steps {
- pre_test()
+ pre_test_p()
sh '''
cd ${WKC}/tests
find pytest -name '*'sql|xargs rm -rf
@@ -48,7 +75,7 @@ pipeline {
}
}
stage('test_b1') {
- agent{label 'master'}
+ agent{label 'slad2'}
steps {
pre_test()
@@ -62,7 +89,7 @@ pipeline {
}
stage('test_crash_gen') {
- agent{label "slad2"}
+ agent{label "slad3"}
steps {
pre_test()
sh '''
@@ -92,7 +119,7 @@ pipeline {
}
sh'''
- systemctl start taosd
+ nohup taosd >/dev/null &
sleep 10
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
@@ -128,7 +155,7 @@ pipeline {
'''
}
sh '''
- systemctl stop taosd
+ pkill -9 taosd || echo 1
cd ${WKC}/tests
./test-all.sh b2
date
@@ -141,7 +168,7 @@ pipeline {
}
stage('test_valgrind') {
- agent{label "slad3"}
+ agent{label "slad4"}
steps {
pre_test()
diff --git a/tests/examples/C#/C#checker/C#checker.cs b/tests/examples/C#/C#checker/C#checker.cs
index 80fa3b838661ecbc80d727612166c5396df279b3..29ad290343bb4fbacade48a0b59e0350bd35f213 100644
--- a/tests/examples/C#/C#checker/C#checker.cs
+++ b/tests/examples/C#/C#checker/C#checker.cs
@@ -33,7 +33,7 @@ namespace TDengineDriver
//sql parameters
private string dbName;
private string tbName;
-
+ private string precision;
private bool isInsertData;
private bool isQueryData;
@@ -61,9 +61,9 @@ namespace TDengineDriver
tester.checkInsert();
tester.checkSelect();
tester.checkDropTable();
-
+ tester.dropDatabase();
tester.CloseConnection();
-
+ tester.cleanup();
}
@@ -156,7 +156,9 @@ namespace TDengineDriver
Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "How many rows to insert, default is 100");
Console.WriteLine("{0:G}{1:G}", indent, "-c");
Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Configuration directory");
-
+ //
+ Console.WriteLine("{0:G}{1:G}", indent, "-ps");
+ Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Configurate db precision,default millisecond");
ExitProgram();
}
}
@@ -168,9 +170,9 @@ namespace TDengineDriver
host = this.GetArgumentAsString(argv, "-h", "127.0.0.1");
user = this.GetArgumentAsString(argv, "-u", "root");
password = this.GetArgumentAsString(argv, "-p", "taosdata");
- dbName = this.GetArgumentAsString(argv, "-db", "test");
+ dbName = this.GetArgumentAsString(argv, "-d", "test");
tbName = this.GetArgumentAsString(argv, "-s", "weather");
-
+ precision = this.GetArgumentAsString(argv, "-ps", "ms");
isInsertData = this.GetArgumentAsLong(argv, "-w", 0, 1, 1) != 0;
isQueryData = this.GetArgumentAsLong(argv, "-r", 0, 1, 1) != 0;
tableCount = this.GetArgumentAsLong(argv, "-n", 1, 10000, 10);
@@ -183,6 +185,7 @@ namespace TDengineDriver
{
TDengine.Options((int)TDengineInitOption.TDDB_OPTION_CONFIGDIR, this.configDir);
TDengine.Options((int)TDengineInitOption.TDDB_OPTION_SHELL_ACTIVITY_TIMER, "60");
+ Console.WriteLine("init...");
TDengine.Init();
Console.WriteLine("get connection starting...");
}
@@ -204,7 +207,7 @@ namespace TDengineDriver
public void createDatabase()
{
StringBuilder sql = new StringBuilder();
- sql.Append("create database if not exists ").Append(this.dbName);
+ sql.Append("create database if not exists ").Append(this.dbName).Append(" precision '").Append(this.precision).Append("'");
execute(sql.ToString());
}
public void useDatabase()
@@ -216,8 +219,8 @@ namespace TDengineDriver
public void checkSelect()
{
StringBuilder sql = new StringBuilder();
- sql.Append("select * from test.weather");
- execute(sql.ToString());
+ sql.Append("select * from ").Append(this.dbName).Append(".").Append(this.tbName);
+ ExecuteQuery(sql.ToString());
}
public void createTable()
{
@@ -228,7 +231,7 @@ namespace TDengineDriver
public void checkInsert()
{
StringBuilder sql = new StringBuilder();
- sql.Append("insert into test.weather (ts, temperature, humidity) values(now, 20.5, 34)");
+ sql.Append("insert into ").Append(this.dbName).Append(".").Append(this.tbName).Append("(ts, temperature, humidity) values(now, 20.5, 34)");
execute(sql.ToString());
}
public void checkDropTable()
@@ -237,6 +240,12 @@ namespace TDengineDriver
sql.Append("drop table if exists ").Append(this.dbName).Append(".").Append(this.tbName).Append("");
execute(sql.ToString());
}
+ public void dropDatabase()
+ {
+ StringBuilder sql = new StringBuilder();
+ sql.Append("drop database if exists ").Append(this.dbName);
+ execute(sql.ToString());
+ }
public void execute(string sql)
{
DateTime dt1 = DateTime.Now;
@@ -266,6 +275,7 @@ namespace TDengineDriver
DateTime dt1 = DateTime.Now;
long queryRows = 0;
IntPtr res = TDengine.Query(conn, sql);
+ getPrecision(res);
if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0))
{
Console.Write(sql.ToString() + " failure, ");
@@ -379,8 +389,31 @@ namespace TDengineDriver
static void ExitProgram()
{
- TDengine.Cleanup();
System.Environment.Exit(0);
}
+
+ public void cleanup()
+ {
+ Console.WriteLine("clean up...");
+ System.Environment.Exit(0);
+ }
+ // method to get db precision
+ public void getPrecision(IntPtr res)
+ {
+ int psc=TDengine.ResultPrecision(res);
+ switch(psc)
+ {
+ case 0:
+ Console.WriteLine("db:[{0:G}]'s precision is {1:G}",this.dbName,"millisecond");
+ break;
+ case 1:
+ Console.WriteLine("db:[{0:G}]'s precision is {1:G}",this.dbName,"microsecond");
+ break;
+ case 2:
+ Console.WriteLine("db:[{0:G}]'s precision is {1:G}",this.dbName,"nanosecond");
+ break;
+ }
+
+ }
}
}
diff --git a/tests/examples/C#/C#checker/TDengineDriver.cs b/tests/examples/C#/C#checker/TDengineDriver.cs
index b6f143e1813d60c1ac4ae8356efdca4929c51345..2864b7bcdddc6cb5ded1bb1bd54ff818b2181d18 100644
--- a/tests/examples/C#/C#checker/TDengineDriver.cs
+++ b/tests/examples/C#/C#checker/TDengineDriver.cs
@@ -19,136 +19,153 @@ using System.Runtime.InteropServices;
namespace TDengineDriver
{
- enum TDengineDataType {
- TSDB_DATA_TYPE_NULL = 0, // 1 bytes
- TSDB_DATA_TYPE_BOOL = 1, // 1 bytes
- TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes
- TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes
- TSDB_DATA_TYPE_INT = 4, // 4 bytes
- TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes
- TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes
- TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes
- TSDB_DATA_TYPE_BINARY = 8, // string
- TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes
- TSDB_DATA_TYPE_NCHAR = 10 // unicode string
- }
-
- enum TDengineInitOption
- {
- TSDB_OPTION_LOCALE = 0,
- TSDB_OPTION_CHARSET = 1,
- TSDB_OPTION_TIMEZONE = 2,
- TDDB_OPTION_CONFIGDIR = 3,
- TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4
- }
-
- class TDengineMeta
- {
- public string name;
- public short size;
- public byte type;
- public string TypeName()
+ enum TDengineDataType
{
- switch ((TDengineDataType)type)
- {
- case TDengineDataType.TSDB_DATA_TYPE_BOOL:
- return "BOOLEAN";
- case TDengineDataType.TSDB_DATA_TYPE_TINYINT:
- return "BYTE";
- case TDengineDataType.TSDB_DATA_TYPE_SMALLINT:
- return "SHORT";
- case TDengineDataType.TSDB_DATA_TYPE_INT:
- return "INT";
- case TDengineDataType.TSDB_DATA_TYPE_BIGINT:
- return "LONG";
- case TDengineDataType.TSDB_DATA_TYPE_FLOAT:
- return "FLOAT";
- case TDengineDataType.TSDB_DATA_TYPE_DOUBLE:
- return "DOUBLE";
- case TDengineDataType.TSDB_DATA_TYPE_BINARY:
- return "STRING";
- case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP:
- return "TIMESTAMP";
- case TDengineDataType.TSDB_DATA_TYPE_NCHAR:
- return "NCHAR";
- default:
- return "undefine";
- }
+ TSDB_DATA_TYPE_NULL = 0, // 1 bytes
+ TSDB_DATA_TYPE_BOOL = 1, // 1 bytes
+ TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes
+ TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes
+ TSDB_DATA_TYPE_INT = 4, // 4 bytes
+ TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes
+ TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes
+ TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes
+ TSDB_DATA_TYPE_BINARY = 8, // string
+ TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes
+ TSDB_DATA_TYPE_NCHAR = 10, // unicode string
+ TSDB_DATA_TYPE_UTINYINT = 11,// 1 byte
+ TSDB_DATA_TYPE_USMALLINT= 12,// 2 bytes
+ TSDB_DATA_TYPE_UINT = 13, // 4 bytes
+ TSDB_DATA_TYPE_UBIGINT= 14 // 8 bytes
}
- }
- class TDengine
- {
- public const int TSDB_CODE_SUCCESS = 0;
+ enum TDengineInitOption
+ {
+ TSDB_OPTION_LOCALE = 0,
+ TSDB_OPTION_CHARSET = 1,
+ TSDB_OPTION_TIMEZONE = 2,
+ TDDB_OPTION_CONFIGDIR = 3,
+ TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4
+ }
- [DllImport("taos.dll", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)]
- static extern public void Init();
+ class TDengineMeta
+ {
+ public string name;
+ public short size;
+ public byte type;
+ public string TypeName()
+ {
+ switch ((TDengineDataType)type)
+ {
+ case TDengineDataType.TSDB_DATA_TYPE_BOOL:
+ return "BOOL";
+ case TDengineDataType.TSDB_DATA_TYPE_TINYINT:
+ return "TINYINT";
+ case TDengineDataType.TSDB_DATA_TYPE_SMALLINT:
+ return "SMALLINT";
+ case TDengineDataType.TSDB_DATA_TYPE_INT:
+ return "INT";
+ case TDengineDataType.TSDB_DATA_TYPE_BIGINT:
+ return "BIGINT";
+ case TDengineDataType.TSDB_DATA_TYPE_UTINYINT:
+ return "TINYINT UNSIGNED";
+ case TDengineDataType.TSDB_DATA_TYPE_USMALLINT:
+ return "SMALLINT UNSIGNED";
+ case TDengineDataType.TSDB_DATA_TYPE_UINT:
+ return "INT UNSIGNED";
+ case TDengineDataType.TSDB_DATA_TYPE_UBIGINT:
+ return "BIGINT UNSIGNED";
+ case TDengineDataType.TSDB_DATA_TYPE_FLOAT:
+ return "FLOAT";
+ case TDengineDataType.TSDB_DATA_TYPE_DOUBLE:
+ return "DOUBLE";
+ case TDengineDataType.TSDB_DATA_TYPE_BINARY:
+ return "STRING";
+ case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP:
+ return "TIMESTAMP";
+ case TDengineDataType.TSDB_DATA_TYPE_NCHAR:
+ return "NCHAR";
+ default:
+ return "undefine";
+ }
+ }
+ }
- [DllImport("taos.dll", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)]
- static extern public void Cleanup();
+ class TDengine
+ {
+ public const int TSDB_CODE_SUCCESS = 0;
- [DllImport("taos.dll", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)]
- static extern public void Options(int option, string value);
+ [DllImport("taos", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)]
+ static extern public void Init();
- [DllImport("taos.dll", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)]
- static extern public IntPtr Connect(string ip, string user, string password, string db, short port);
+ [DllImport("taos", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)]
+ static extern public void Cleanup();
- [DllImport("taos.dll", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)]
- static extern private IntPtr taos_errstr(IntPtr res);
- static public string Error(IntPtr res)
- {
- IntPtr errPtr = taos_errstr(res);
- return Marshal.PtrToStringAnsi(errPtr);
- }
+ [DllImport("taos", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)]
+ static extern public void Options(int option, string value);
- [DllImport("taos.dll", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)]
- static extern public int ErrorNo(IntPtr res);
+ [DllImport("taos", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)]
+ static extern public IntPtr Connect(string ip, string user, string password, string db, short port);
- [DllImport("taos.dll", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)]
- static extern public IntPtr Query(IntPtr conn, string sqlstr);
+ [DllImport("taos", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)]
+ static extern private IntPtr taos_errstr(IntPtr res);
+ static public string Error(IntPtr res)
+ {
+ IntPtr errPtr = taos_errstr(res);
+ return Marshal.PtrToStringAnsi(errPtr);
+ }
- [DllImport("taos.dll", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)]
- static extern public int AffectRows(IntPtr res);
+ [DllImport("taos", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)]
+ static extern public int ErrorNo(IntPtr res);
- [DllImport("taos.dll", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)]
- static extern public int FieldCount(IntPtr res);
+ [DllImport("taos", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)]
+ static extern public IntPtr Query(IntPtr conn, string sqlstr);
- [DllImport("taos.dll", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)]
- static extern private IntPtr taos_fetch_fields(IntPtr res);
- static public List FetchFields(IntPtr res)
- {
- const int fieldSize = 68;
-
- List metas = new List();
- if (res == IntPtr.Zero)
- {
- return metas;
- }
-
- int fieldCount = FieldCount(res);
- IntPtr fieldsPtr = taos_fetch_fields(res);
-
- for (int i = 0; i < fieldCount; ++i)
- {
- int offset = i * fieldSize;
-
- TDengineMeta meta = new TDengineMeta();
- meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset);
- meta.type = Marshal.ReadByte(fieldsPtr + offset + 65);
- meta.size = Marshal.ReadInt16(fieldsPtr + offset + 66);
- metas.Add(meta);
- }
-
- return metas;
- }
+ [DllImport("taos", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)]
+ static extern public int AffectRows(IntPtr res);
- [DllImport("taos.dll", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)]
- static extern public IntPtr FetchRows(IntPtr res);
+ [DllImport("taos", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)]
+ static extern public int FieldCount(IntPtr res);
- [DllImport("taos.dll", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)]
- static extern public IntPtr FreeResult(IntPtr res);
+ [DllImport("taos", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)]
+ static extern private IntPtr taos_fetch_fields(IntPtr res);
+ static public List FetchFields(IntPtr res)
+ {
+ const int fieldSize = 68;
- [DllImport("taos.dll", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)]
- static extern public int Close(IntPtr taos);
- }
-}
\ No newline at end of file
+ List metas = new List();
+ if (res == IntPtr.Zero)
+ {
+ return metas;
+ }
+
+ int fieldCount = FieldCount(res);
+ IntPtr fieldsPtr = taos_fetch_fields(res);
+
+ for (int i = 0; i < fieldCount; ++i)
+ {
+ int offset = i * fieldSize;
+
+ TDengineMeta meta = new TDengineMeta();
+ meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset);
+ meta.type = Marshal.ReadByte(fieldsPtr + offset + 65);
+ meta.size = Marshal.ReadInt16(fieldsPtr + offset + 66);
+ metas.Add(meta);
+ }
+
+ return metas;
+ }
+
+ [DllImport("taos", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)]
+ static extern public IntPtr FetchRows(IntPtr res);
+
+ [DllImport("taos", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)]
+ static extern public IntPtr FreeResult(IntPtr res);
+
+ [DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)]
+ static extern public int Close(IntPtr taos);
+
+ //get precisionin parameter restultset
+ [DllImport("taos", EntryPoint = "taos_result_precision", CallingConvention = CallingConvention.Cdecl)]
+ static extern public int ResultPrecision(IntPtr taos);
+ }
+}
diff --git a/tests/examples/C#/TDengineDriver.cs b/tests/examples/C#/TDengineDriver.cs
index 2c150341f62d16372a99d341a495771e4c2a3dbc..2864b7bcdddc6cb5ded1bb1bd54ff818b2181d18 100644
--- a/tests/examples/C#/TDengineDriver.cs
+++ b/tests/examples/C#/TDengineDriver.cs
@@ -163,5 +163,9 @@ namespace TDengineDriver
[DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)]
static extern public int Close(IntPtr taos);
+
+ //get precisionin parameter restultset
+ [DllImport("taos", EntryPoint = "taos_result_precision", CallingConvention = CallingConvention.Cdecl)]
+ static extern public int ResultPrecision(IntPtr taos);
}
}
diff --git a/tests/examples/C#/taosdemo/TDengineDriver.cs b/tests/examples/C#/taosdemo/TDengineDriver.cs
index 2c150341f62d16372a99d341a495771e4c2a3dbc..e6c3a598adc0bc4bcf5ea84953f649b418199555 100644
--- a/tests/examples/C#/taosdemo/TDengineDriver.cs
+++ b/tests/examples/C#/taosdemo/TDengineDriver.cs
@@ -163,5 +163,8 @@ namespace TDengineDriver
[DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)]
static extern public int Close(IntPtr taos);
+ //get precisionin parameter restultset
+ [DllImport("taos", EntryPoint = "taos_result_precision", CallingConvention = CallingConvention.Cdecl)]
+ static extern public int ResultPrecision(IntPtr taos);
}
}
diff --git a/tests/examples/JDBC/JDBCDemo/pom.xml b/tests/examples/JDBC/JDBCDemo/pom.xml
index ca8cd24030c7dbfc23dba3caef99c051f8416dcf..fed00c147b87621c70d60ea206b06f1b0f3e8d8f 100644
--- a/tests/examples/JDBC/JDBCDemo/pom.xml
+++ b/tests/examples/JDBC/JDBCDemo/pom.xml
@@ -17,7 +17,7 @@
com.taosdata.jdbc
taos-jdbcdriver
- 2.0.30
+ 2.0.31
diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java
index f256668dc6a3cd8ce7a2626be3d37a354919f955..d4ea5f919d2882e4f82b817380172eff20d7c611 100644
--- a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java
+++ b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java
@@ -43,6 +43,7 @@ public class JdbcDemo {
if (connection != null)
System.out.println("[ OK ] Connection established.");
} catch (ClassNotFoundException | SQLException e) {
+ System.out.println("[ ERROR! ] Connection establish failed.");
e.printStackTrace();
}
}
@@ -68,7 +69,7 @@ public class JdbcDemo {
}
private void insert() {
- final String sql = "insert into " + dbName + "." + tbName + " (ts, temperature, humidity) values(now, 20.5, 34)";
+ final String sql = "insert into " + dbName + "." + tbName + " (ts, temperature, humidity) values(now, 20.5, 34)";
exuete(sql);
}
@@ -91,13 +92,15 @@ public class JdbcDemo {
/************************************************************************/
private void executeQuery(String sql) {
+ long start = System.currentTimeMillis();
try (Statement statement = connection.createStatement()) {
- long start = System.currentTimeMillis();
ResultSet resultSet = statement.executeQuery(sql);
long end = System.currentTimeMillis();
printSql(sql, true, (end - start));
printResult(resultSet);
} catch (SQLException e) {
+ long end = System.currentTimeMillis();
+ printSql(sql, false, (end - start));
e.printStackTrace();
}
}
@@ -120,12 +123,14 @@ public class JdbcDemo {
}
private void exuete(String sql) {
+ long start = System.currentTimeMillis();
try (Statement statement = connection.createStatement()) {
- long start = System.currentTimeMillis();
boolean execute = statement.execute(sql);
long end = System.currentTimeMillis();
- printSql(sql, execute, (end - start));
+ printSql(sql, true, (end - start));
} catch (SQLException e) {
+ long end = System.currentTimeMillis();
+ printSql(sql, false, (end - start));
e.printStackTrace();
}
@@ -137,4 +142,4 @@ public class JdbcDemo {
}
-}
+}
\ No newline at end of file
diff --git a/tests/examples/c/apitest.c b/tests/examples/c/apitest.c
index ac522d6151b005e51dee0dd6d352f1eeb5cb62a9..01169715f3e8b5b9d6e212b4b317ecca5fa4dbcd 100644
--- a/tests/examples/c/apitest.c
+++ b/tests/examples/c/apitest.c
@@ -359,7 +359,7 @@ void verify_prepare(TAOS* taos) {
v.v8 = (int64_t)(i * 8);
v.f4 = (float)(i * 40);
v.f8 = (double)(i * 80);
- for (int j = 0; j < sizeof(v.bin) - 1; ++j) {
+ for (int j = 0; j < sizeof(v.bin); ++j) {
v.bin[j] = (char)(i + '0');
}
@@ -556,7 +556,7 @@ void verify_prepare2(TAOS* taos) {
v.v8[i] = (int64_t)(i * 8);
v.f4[i] = (float)(i * 40);
v.f8[i] = (double)(i * 80);
- for (int j = 0; j < sizeof(v.bin[0]) - 1; ++j) {
+ for (int j = 0; j < sizeof(v.bin[0]); ++j) {
v.bin[i][j] = (char)(i + '0');
}
strcpy(v.blob[i], "一二三四五六七八九十");
@@ -808,7 +808,7 @@ void verify_prepare3(TAOS* taos) {
v.v8[i] = (int64_t)(i * 8);
v.f4[i] = (float)(i * 40);
v.f8[i] = (double)(i * 80);
- for (int j = 0; j < sizeof(v.bin[0]) - 1; ++j) {
+ for (int j = 0; j < sizeof(v.bin[0]); ++j) {
v.bin[i][j] = (char)(i + '0');
}
strcpy(v.blob[i], "一二三四五六七八九十");
@@ -954,7 +954,7 @@ int32_t verify_schema_less(TAOS* taos) {
result = taos_query(taos, "drop database if exists test;");
taos_free_result(result);
usleep(100000);
- result = taos_query(taos, "create database test precision 'us';");
+ result = taos_query(taos, "create database test precision 'us' update 1;");
taos_free_result(result);
usleep(100000);
@@ -963,6 +963,8 @@ int32_t verify_schema_less(TAOS* taos) {
taos_free_result(result);
usleep(100000);
+ int code = 0;
+
char* lines[] = {
"st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000ns",
"st,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000ns",
@@ -975,8 +977,8 @@ int32_t verify_schema_less(TAOS* taos) {
"stf,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin_stf\",c2=false,c5=5f64,c6=7u64 1626006933641000000ns"
};
- int code = 0;
code = taos_insert_lines(taos, lines , sizeof(lines)/sizeof(char*));
+
char* lines2[] = {
"stg,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000ns",
"stg,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000ns"
@@ -989,7 +991,27 @@ int32_t verify_schema_less(TAOS* taos) {
"sth,t1=4i64,t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin_stf\",c2=false,c5=5f64,c6=7u64 1626006933654ms"
};
code = taos_insert_lines(taos, lines3, 2);
- return code;
+
+ char* lines4[] = {
+ "st123456,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000ns",
+ "dgtyqodr,t2=5f64,t3=L\"ste\" c1=tRue,c2=4i64,c3=\"iam\" 1626056811823316532ns"
+ };
+ code = taos_insert_lines(taos, lines4, 2);
+
+ char* lines5[] = {
+ "zqlbgs,id=\"zqlbgs_39302_21680\",t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"binaryColValue\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000ns",
+ "zqlbgs,t9=f,id=\"zqlbgs_39302_21680\",t0=f,t1=127i8,t11=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\",t10=L\"ncharTagValue\" c10=f,c0=f,c1=127i8,c12=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"binaryColValue\",c8=L\"ncharColValue\",c9=7u64,c11=L\"ncharColValue\" 1626006833639000000ns"
+ };
+ code = taos_insert_lines(taos, &lines5[0], 1);
+ code = taos_insert_lines(taos, &lines5[1], 1);
+
+
+ char* lines6[] = {
+ "st123456,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000ns",
+ "dgtyqodr,t2=5f64,t3=L\"ste\" c1=tRue,c2=4i64,c3=\"iam\" 1626056811823316532ns"
+ };
+ code = taos_insert_lines(taos, lines6, 2);
+ return (code);
}
int main(int argc, char *argv[]) {
@@ -1009,7 +1031,7 @@ int main(int argc, char *argv[]) {
info = taos_get_client_info(taos);
printf("client info: %s\n", info);
- printf("************ verify shemaless *************\n");
+ printf("************ verify schema-less *************\n");
verify_schema_less(taos);
@@ -1027,14 +1049,12 @@ int main(int argc, char *argv[]) {
printf("************ verify prepare2 *************\n");
verify_prepare2(taos);
-
printf("************ verify prepare3 *************\n");
verify_prepare3(taos);
-
+
printf("************ verify stream *************\n");
verify_stream(taos);
printf("done\n");
-
taos_close(taos);
taos_cleanup();
}
diff --git a/tests/examples/c/schemaless.c b/tests/examples/c/schemaless.c
index d6450914dfc7d406febca1792a35bd677c7b185a..3ea199c9144950526e4bbf59b9356753e2a88da6 100644
--- a/tests/examples/c/schemaless.c
+++ b/tests/examples/c/schemaless.c
@@ -9,8 +9,8 @@
#include
int numSuperTables = 8;
-int numChildTables = 1024;
-int numRowsPerChildTable = 128;
+int numChildTables = 4;
+int numRowsPerChildTable = 2048;
void shuffle(char**lines, size_t n)
{
@@ -157,5 +157,45 @@ int main(int argc, char* argv[]) {
return -1;
}
+ //Duplicate key check;
+ char* lines_003_1[] = {
+ "std,id=\"std_3_1\",t1=4i64,Id=\"std\",t2=true c1=true 1626006834s"
+ };
+
+ code = taos_insert_lines(taos, lines_003_1 , sizeof(lines_003_1)/sizeof(char*));
+ if (0 == code) {
+ printf("taos_insert_lines() lines_003_1 return code:%d (%s)\n", code, (char*)tstrerror(code));
+ return -1;
+ }
+
+ char* lines_003_2[] = {
+ "std,id=\"std_3_2\",tag1=4i64,Tag2=true,tAg3=2,TaG2=\"dup!\" c1=true 1626006834s"
+ };
+
+ code = taos_insert_lines(taos, lines_003_2 , sizeof(lines_003_2)/sizeof(char*));
+ if (0 == code) {
+ printf("taos_insert_lines() lines_003_2 return code:%d (%s)\n", code, (char*)tstrerror(code));
+ return -1;
+ }
+
+ char* lines_003_3[] = {
+ "std,id=\"std_3_3\",tag1=4i64 field1=true,Field2=2,FIElD1=\"dup!\",fIeLd4=true 1626006834s"
+ };
+
+ code = taos_insert_lines(taos, lines_003_3 , sizeof(lines_003_3)/sizeof(char*));
+ if (0 == code) {
+ printf("taos_insert_lines() lines_003_3 return code:%d (%s)\n", code, (char*)tstrerror(code));
+ return -1;
+ }
+
+ char* lines_003_4[] = {
+ "std,id=\"std_3_4\",tag1=4i64,dupkey=4i16,tag2=T field1=true,dUpkEy=1e3f32,field2=\"1234\" 1626006834s"
+ };
+
+ code = taos_insert_lines(taos, lines_003_4 , sizeof(lines_003_4)/sizeof(char*));
+ if (0 == code) {
+ printf("taos_insert_lines() lines_003_4 return code:%d (%s)\n", code, (char*)tstrerror(code));
+ return -1;
+ }
return 0;
}
diff --git a/tests/mas/Jenkinsfile b/tests/mas/Jenkinsfile
index ae2286298f3e7de4e831451ccdeaef28a5d71a8e..84c2d8e4c2d63094812a0ebde7f5f3150f375ccf 100644
--- a/tests/mas/Jenkinsfile
+++ b/tests/mas/Jenkinsfile
@@ -1,5 +1,32 @@
def pre_test(){
+ sh '''
+ sudo rmtaos||echo 'no taosd installed'
+ '''
+ sh '''
+ cd ${WKC}
+ git reset --hard
+ git checkout $BRANCH_NAME
+ git pull
+ git submodule update
+ cd ${WK}
+ git reset --hard
+ git checkout $BRANCH_NAME
+ git pull
+ export TZ=Asia/Harbin
+ date
+ rm -rf ${WK}/debug
+ mkdir debug
+ cd debug
+ cmake .. > /dev/null
+ make > /dev/null
+ make install > /dev/null
+ pip3 install ${WKC}/src/connector/python/ || echo 0
+ '''
+ return 1
+}
+def pre_test_p(){
+
sh '''
sudo rmtaos||echo 'no taosd installed'
'''
@@ -39,7 +66,7 @@ pipeline {
stage('pytest') {
agent{label 'slam1'}
steps {
- pre_test()
+ pre_test_p()
sh '''
cd ${WKC}/tests
find pytest -name '*'sql|xargs rm -rf
@@ -92,7 +119,7 @@ pipeline {
}
sh'''
- systemctl start taosd
+ nohup taosd >/dev/null &
sleep 10
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
@@ -136,7 +163,7 @@ pipeline {
'''
}
sh '''
- systemctl stop taosd
+ pkill -9 taosd || echo 1
cd ${WKC}/tests
./test-all.sh b2
date
diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh
index 555abf186319bafe06f95ef77e892252a7998f18..b86e96d0bb40c43c7f39da9b372389caa4969c4e 100755
--- a/tests/pytest/fulltest.sh
+++ b/tests/pytest/fulltest.sh
@@ -44,6 +44,7 @@ python3 ./test.py -f table/del_stable.py
#stable
python3 ./test.py -f stable/insert.py
+python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py
# tag
python3 ./test.py -f tag_lite/filter.py
@@ -162,7 +163,12 @@ python3 test.py -f tools/taosdemoTestSampleData.py
python3 test.py -f tools/taosdemoTestInterlace.py
python3 test.py -f tools/taosdemoTestQuery.py
-
+# nano support
+python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py
+python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py
+python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py
+python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertTime_step.py
+python3 test.py -f tools/taosdumpTestNanoSupport.py
# update
python3 ./test.py -f update/allow_update.py
@@ -194,6 +200,10 @@ python3 ./test.py -f perfbenchmark/bug3433.py
#python3 ./test.py -f perfbenchmark/bug3589.py
python3 ./test.py -f perfbenchmark/taosdemoInsert.py
+#taosdemo
+python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
+python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
+
#query
python3 ./test.py -f query/filter.py
python3 ./test.py -f query/filterCombo.py
@@ -234,6 +244,7 @@ python3 ./test.py -f query/queryStddevWithGroupby.py
python3 ./test.py -f query/querySecondtscolumnTowherenow.py
python3 ./test.py -f query/queryFilterTswithDateUnit.py
python3 ./test.py -f query/queryTscomputWithNow.py
+python3 ./test.py -f query/queryStableJoin.py
python3 ./test.py -f query/computeErrorinWhere.py
python3 ./test.py -f query/queryTsisNull.py
python3 ./test.py -f query/subqueryFilter.py
@@ -242,6 +253,7 @@ python3 ./test.py -f query/queryStateWindow.py
python3 ./test.py -f query/nestedQuery/queryWithOrderLimit.py
python3 ./test.py -f query/nestquery_last_row.py
python3 ./test.py -f query/queryCnameDisplay.py
+python3 ./test.py -f query/operator_cost.py
python3 test.py -f query/nestedQuery/queryWithSpread.py
#stream
@@ -353,15 +365,25 @@ python3 ./test.py -f alter/alter_debugFlag.py
python3 ./test.py -f query/queryBetweenAnd.py
python3 ./test.py -f tag_lite/alter_tag.py
-# python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
-python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
+
python3 test.py -f tools/taosdemoAllTest/TD-4985/query-limit-offset.py
+python3 test.py -f tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py
+python3 test.py -f tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py
python3 ./test.py -f tag_lite/drop_auto_create.py
python3 test.py -f insert/insert_before_use_db.py
python3 test.py -f alter/alter_keep.py
python3 test.py -f alter/alter_cacheLastRow.py
python3 ./test.py -f query/querySession.py
python3 test.py -f alter/alter_create_exception.py
-
python3 ./test.py -f insert/flushwhiledrop.py
+
#======================p4-end===============
+python3 test.py -f tools/taosdemoAllTest/pytest.py
+
+
+
+
+
+
+
+
diff --git a/tests/pytest/insert/line_insert.py b/tests/pytest/insert/line_insert.py
index ff3a32b0f79028ce4f612c12b41171a2bd45a765..53eaa55aa50a1369b4aff9c49421263788205038 100644
--- a/tests/pytest/insert/line_insert.py
+++ b/tests/pytest/insert/line_insert.py
@@ -77,6 +77,8 @@ class TDTestCase:
"sth,t1=4i64,t2=5f64,t4=5f64,ID=\"childtable\" c1=3i64,c3=L\"passitagin_stf\",c2=false,c5=5f64,c6=7u64 1626006933641ms",
"sth,t1=4i64,t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin_stf\",c2=false,c5=5f64,c6=7u64 1626006933654ms"
])
+ tdSql.execute('reset query cache')
+
tdSql.query('select tbname, * from sth')
tdSql.checkRows(2)
diff --git a/tests/pytest/insert/metadataUpdate.py b/tests/pytest/insert/metadataUpdate.py
index 1a960a20e60d6bbb25b08a47725b262e1b49da49..f996a707ffc8a98a49f4dc607cb19316d3f1085a 100644
--- a/tests/pytest/insert/metadataUpdate.py
+++ b/tests/pytest/insert/metadataUpdate.py
@@ -16,7 +16,6 @@ from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
from util.dnodes import tdDnodes
-from multiprocessing import Process
import subprocess
class TDTestCase:
@@ -28,16 +27,6 @@ class TDTestCase:
self.tables = 10
self.rows = 1000
- def updateMetadata(self):
- self.host = "127.0.0.1"
- self.user = "root"
- self.password = "taosdata"
- self.config = tdDnodes.getSimCfgPath()
-
- self.conn = taos.connect(host = self.host, user = self.user, password = self.password, config = self.config)
- self.cursor = self.conn.cursor()
- self.cursor.execute("alter table db.tb add column col2 int")
- print("alter table done")
def deleteTableAndRecreate(self):
self.config = tdDnodes.getSimCfgPath()
@@ -68,11 +57,15 @@ class TDTestCase:
tdSql.query("select * from tb")
tdSql.checkRows(1)
- p = Process(target=self.updateMetadata, args=())
- p.start()
- p.join()
- p.terminate()
-
+ self.config = tdDnodes.getSimCfgPath()
+ command = ["taos", "-c", self.config, "-s", "alter table db.tb add column col2 int;"]
+ print("alter table db.tb add column col2 int;")
+ result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8")
+ if result.returncode == 0:
+ print("success:", result)
+ else:
+ print("error:", result)
+
tdSql.execute("insert into tb(ts, col1, col2) values(%d, 1, 2)" % (self.ts + 2))
print("==============step2")
diff --git a/tests/pytest/query/nestedQuery/queryWithOrderLimit.py b/tests/pytest/query/nestedQuery/queryWithOrderLimit.py
index 26eda1120b6026655add2bcf6c601bf8dd22c54a..692b5b7d364bee2164bda6707443b29c4cef4d14 100644
--- a/tests/pytest/query/nestedQuery/queryWithOrderLimit.py
+++ b/tests/pytest/query/nestedQuery/queryWithOrderLimit.py
@@ -70,6 +70,14 @@ class TDTestCase:
tdSql.query("select * from (select avg(value), sum(value) from st group by tbname slimit 5 soffset 7)")
tdSql.checkRows(3)
+ # https://jira.taosdata.com:18080/browse/TD-5497
+ tdSql.execute("create table tt(ts timestamp ,i int)")
+ tdSql.execute("insert into tt values(now, 11)(now + 1s, -12)")
+ tdSql.query("select * from (select max(i),0-min(i) from tt)")
+ tdSql.checkRows(1);
+ tdSql.checkData(0, 0, 11);
+ tdSql.checkData(0, 1, 12.0);
+
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
diff --git a/tests/pytest/query/operator.py b/tests/pytest/query/operator.py
new file mode 100644
index 0000000000000000000000000000000000000000..774a1e5f42403a6b5f67678e53be5e07beaccde2
--- /dev/null
+++ b/tests/pytest/query/operator.py
@@ -0,0 +1,536 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import tdLog
+from util.cases import tdCases
+from util.sql import tdSql
+import random
+import time
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ self.ts = 1600000000000
+ self.num = 10
+
+ def run(self):
+ tdSql.prepare()
+ # test case for https://jira.taosdata.com:18080/browse/TD-5074
+
+ startTime = time.time()
+
+ tdSql.execute('''create stable stable_1
+ (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint,
+ q_bool bool , q_binary binary(20) , q_nchar nchar(20) ,
+ q_float float , q_double double , q_ts timestamp)
+ tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint,
+ t_bool bool , t_binary binary(20) , t_nchar nchar(20) ,
+ t_float float , t_double double , t_ts timestamp);''')
+ tdSql.execute('''create stable stable_2
+ (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint,
+ q_bool bool , q_binary binary(20) , q_nchar nchar(20) ,
+ q_float float , q_double double , q_ts timestamp)
+ tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint,
+ t_bool bool , t_binary binary(20) , t_nchar nchar(20) ,
+ t_float float , t_double double , t_ts timestamp);''')
+ tdSql.execute('''create table table_0 using stable_1
+ tags('table_0' , '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0')''')
+ tdSql.execute('''create table table_1 using stable_1
+ tags('table_1' , '2147483647' , '9223372036854775807' , '32767' , '127' , 1 ,
+ 'binary1' , 'nchar1' , '1' , '11' , \'1999-09-09 09:09:09.090\')''')
+ tdSql.execute('''create table table_2 using stable_1
+ tags('table_2' , '-2147483647' , '-9223372036854775807' , '-32767' , '-127' , false ,
+ 'binary2' , 'nchar2nchar2' , '-2.2' , '-22.22' , \'2099-09-09 09:09:09.090\')''')
+ tdSql.execute('''create table table_3 using stable_1
+ tags('table_3' , '3' , '3' , '3' , '3' , true , 'binary3' , 'nchar3' , '33.33' , '3333.3333' , '0')''')
+ tdSql.execute('''create table table_4 using stable_1
+ tags('table_4' , '4' , '4' , '4' , '4' , false , 'binary4' , 'nchar4' , '-444.444' , '-444444.444444' , '0')''')
+ tdSql.execute('''create table table_5 using stable_1
+ tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''')
+ tdSql.execute('''create table table_21 using stable_2
+ tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''')
+ #regular table
+ tdSql.execute('''create table regular_table_1
+ (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint,
+ q_bool bool , q_binary binary(20) , q_nchar nchar(20) ,
+ q_float float , q_double double , q_ts timestamp) ;''')
+
+ for i in range(self.num):
+ tdSql.execute('''insert into table_0 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)'''
+ % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i))
+ tdSql.execute('''insert into table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)'''
+ % (self.ts + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i,
+ i, i, random.random(), random.random(), 1262304000001 + i))
+ tdSql.execute('''insert into table_2 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)'''
+ % (self.ts + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i,
+ i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i))
+ tdSql.execute('''insert into table_3 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)'''
+ % (self.ts + i, random.randint(-2147483647, 2147483647),
+ random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767),
+ random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000),
+ random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i))
+ tdSql.execute('''insert into table_4 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)'''
+ % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i))
+ tdSql.execute('''insert into table_5 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)'''
+ % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i))
+ tdSql.execute('''insert into table_21 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)'''
+ % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i))
+
+ tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)'''
+ % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i))
+ tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)'''
+ % (self.ts + 100 + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i,
+ i, i, random.random(), random.random(), 1262304000001 + i))
+ tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)'''
+ % (self.ts + 200 + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i,
+ i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i))
+ tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)'''
+ % (self.ts + 300 + i, random.randint(-2147483647, 2147483647),
+ random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767),
+ random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000),
+ random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i))
+ tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)'''
+ % (self.ts + 400 + i, i, i, i, i, i, i, i, i, self.ts + i))
+ tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)'''
+ % (self.ts + 500 + i, i, i, i, i, i, i, i, i, self.ts + i))
+
+ tdLog.info("========== operator=1(OP_TableScan) ==========")
+ tdLog.info("========== operator=7(OP_Project) ==========")
+ sql = '''select * from stable_1'''
+ tdSql.query(sql)
+ tdSql.checkRows(6*self.num)
+ sql = '''select * from regular_table_1'''
+ tdSql.query(sql)
+ tdSql.checkRows(6*self.num)
+
+ tdLog.info("========== operator=14(OP_MultiTableAggregate ) ==========")
+ sql = '''select last_row(*) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkData(0,1,self.num-1)
+
+ tdLog.info("========== operator=6(OP_Aggregate) ==========")
+ sql = '''select last_row(*) from regular_table_1;'''
+ tdSql.query(sql)
+ tdSql.checkData(0,1,self.num-1)
+
+ tdLog.info("========== operator=9(OP_Limit) ==========")
+ sql = '''select * from stable_1 where loc = 'table_0' limit 5;'''
+ tdSql.query(sql)
+ tdSql.checkRows(5)
+ sql = '''select last_row(*) from (select * from stable_1 where loc = 'table_0');'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+
+ sql = '''select * from regular_table_1 ;'''
+ tdSql.query(sql)
+ tdSql.checkRows(6*self.num)
+ sql = '''select last_row(*) from (select * from regular_table_1);'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ tdSql.checkData(0,1,self.num-1)
+
+
+ sql = '''select last_row(*) from
+ ((select * from table_0) union all
+ (select * from table_1) union all
+ (select * from table_2));'''
+ tdSql.error(sql)
+
+ tdLog.info("========== operator=16(OP_DummyInput) ==========")
+ sql = '''select last_row(*) from
+ ((select last_row(*) from table_0) union all
+ (select last_row(*) from table_1) union all
+ (select last_row(*) from table_2));'''
+ tdSql.error(sql)
+
+ sql = '''select last_row(*) from
+ ((select * from table_0 limit 5 offset 5) union all
+ (select * from table_1 limit 5 offset 5) union all
+ (select * from regular_table_1 limit 5 offset 5));'''
+ tdSql.error(sql)
+
+ tdLog.info("========== operator=10(OP_SLimit) ==========")
+ sql = '''select count(*) from stable_1 group by loc slimit 3 soffset 2 ;'''
+ tdSql.query(sql)
+ tdSql.checkRows(3)
+
+ sql = '''select last_row(*) from
+ ((select * from table_0) union all
+ (select * from table_1) union all
+ (select * from table_2));'''
+ tdSql.error(sql)
+
+ tdLog.info("========== operator=20(OP_Distinct) ==========")
+ tdLog.info("========== operator=4(OP_TagScan) ==========")
+ sql = '''select distinct(t_bool) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(2)
+ sql = '''select distinct(loc) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(6)
+ sql = '''select distinct(t_int) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(6)
+ sql = '''select distinct(t_bigint) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(6)
+ sql = '''select distinct(t_smallint) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(6)
+ sql = '''select distinct(t_tinyint) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(6)
+ sql = '''select distinct(t_nchar) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(6)
+ sql = '''select distinct(t_float) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(6)
+ sql = '''select distinct(t_double) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(6)
+ sql = '''select distinct(t_ts) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(3)
+ sql = '''select distinct(tbname) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(6)
+
+ tdLog.info("========== operator=2(OP_DataBlocksOptScan) ==========")
+ sql = '''select last(q_int),first(q_int) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select last(q_bigint),first(q_bigint) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select last(q_smallint),first(q_smallint) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select last(q_tinyint),first(q_tinyint) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select last(q_bool),first(q_bool) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select last(q_binary),first(q_binary) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select last(q_nchar),first(q_nchar) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select last(q_float),first(q_float) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select last(q_double),first(q_double) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select last(q_ts),first(q_ts) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),last(q_bool),last(q_binary),last(q_nchar),
+ last(q_float),last(q_double),last(q_ts),first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint),
+ first(q_bool),first(q_binary),first(q_nchar),first(q_float),first(q_float),first(q_double),first(q_ts) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),last(q_bool),last(q_binary),last(q_nchar),
+ last(q_float),last(q_double),last(q_ts),first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint),first(q_bool),
+ first(q_binary),first(q_nchar),first(q_float),first(q_float),first(q_double),first(q_ts) from regular_table_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+
+ tdLog.info("========== operator=8(OP_Groupby) ==========")
+ sql = '''select stddev(q_int) from table_0 group by q_int;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from stable_1 group by q_int;'''
+ tdSql.query(sql)
+ sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from table_1 group by q_bigint;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from regular_table_1 group by q_smallint;'''
+ tdSql.query(sql)
+
+ tdLog.info("========== operator=11(OP_TimeWindow) ==========")
+ sql = '''select last(q_int) from table_0 interval(1m);'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),
+ first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from table_1 interval(1m);'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),
+ first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from stable_1 interval(1m);'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),
+ first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from regular_table_1 interval(1m);'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+
+ tdLog.info("========== operator=12(OP_SessionWindow) ==========")
+ sql = '''select count(*) from table_1 session(ts,1s);'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select count(*) from regular_table_1 session(ts,1s);'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select count(*),sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
+ sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
+ sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
+ sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
+ sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
+ sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
+ from table_1 session(ts,1s);'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select count(*),sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
+ sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
+ sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
+ sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
+ sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
+ sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
+ from regular_table_1 session(ts,1s);'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+
+ tdLog.info("========== operator=13(OP_Fill) ==========")
+ sql = '''select sum(q_int) from table_0
+ where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);'''
+ tdSql.query(sql)
+ tdSql.checkData(0,1,'None')
+ sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
+ sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
+ sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
+ sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
+ sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
+ sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
+ from stable_1 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);'''
+ tdSql.query(sql)
+ tdSql.checkData(0,1,'None')
+ sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
+ sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
+ sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
+ sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
+ sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
+ sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
+ from regular_table_1 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);'''
+ tdSql.query(sql)
+ tdSql.checkData(0,1,'None')
+ sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
+ sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
+ sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
+ sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
+ sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
+ sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
+ from table_0 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);'''
+ tdSql.query(sql)
+ tdSql.checkData(0,1,'None')
+ #TD-5190
+ sql = '''select sum(q_tinyint),stddev(q_float) from stable_1
+ where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);'''
+ tdSql.query(sql)
+ tdSql.checkData(0,1,'None')
+
+ tdLog.info("========== operator=15(OP_MultiTableTimeInterval) ==========")
+ sql = '''select avg(q_int) from stable_1 where ts=0;'''
+ tdSql.query(sql)
+ tdSql.checkData(0,0,'table_0')
+ sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
+ sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
+ sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
+ sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
+ sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
+ sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
+ from stable_1 group by loc having sum(q_int)>=0;'''
+ tdSql.query(sql)
+ tdSql.checkData(0,0,'table_0')
+ sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
+ sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
+ sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
+ sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
+ sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
+ sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
+ from stable_1 group by loc having avg(q_int)>=0;'''
+ tdSql.query(sql)
+ tdSql.checkData(0,0,'table_0')
+ sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
+ sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
+ sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
+ sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
+ sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
+ sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
+ from stable_1 group by loc having min(q_int)>=0;'''
+ tdSql.query(sql)
+ tdSql.checkData(0,0,'table_0')
+ sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
+ sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
+ sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
+ sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
+ sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
+ sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
+ from stable_1 group by loc having max(q_int)>=0;'''
+ tdSql.query(sql)
+ tdSql.checkData(0,0,'table_0')
+ sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
+ sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
+ sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
+ sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
+ sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
+ sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
+ from stable_1 group by loc having first(q_int)>=0;'''
+ tdSql.query(sql)
+ tdSql.checkData(0,0,'table_0')
+ sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
+ sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
+ sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
+ sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
+ sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
+ sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
+ from stable_1 group by loc having last(q_int)>=0;'''
+ tdSql.query(sql)
+ tdSql.checkData(0,0,'table_0')
+
+ tdLog.info("========== operator=21(OP_Join) ==========")
+ sql = '''select t1.q_int,t2.q_int from
+ (select ts,q_int from table_1) t1 , (select ts,q_int from table_2) t2
+ where t2.ts = t1.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select t1.*,t2.* from
+ (select * from table_1) t1 , (select * from table_2) t2
+ where t2.ts = t1.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select t1.*,t2.* from
+ (select * from regular_table_1) t1 , (select * from table_0) t2
+ where t2.ts = t1.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select t1.*,t2.* from
+ (select * from stable_1) t1 , (select * from table_2) t2
+ where t2.ts = t1.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select t1.*,t2.* from
+ (select * from regular_table_1) t1 , (select * from stable_1) t2
+ where t2.ts = t1.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select t1.*,t2.*,t3.* from
+ (select * from regular_table_1) t1 , (select * from stable_1) t2, (select * from table_0) t3
+ where t2.ts = t1.ts and t3.ts = t1.ts and t2.ts = t3.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+
+ tdLog.info("========== operator=22(OP_StateWindow) ==========")
+ sql = '''select avg(q_int),sum(q_smallint) from table_1 state_window(q_int);'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
+ sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
+ sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
+ sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
+ sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
+ sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
+ from table_1 state_window(q_bigint);'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
+ sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
+ sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
+ sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
+ sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
+ sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
+ from regular_table_1 state_window(q_smallint);'''
+ tdSql.query(sql)
+ tdSql.checkRows(6*self.num)
+
+ endTime = time.time()
+ print("total time %ds" % (endTime - startTime))
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
diff --git a/tests/pytest/query/operator_cost.py b/tests/pytest/query/operator_cost.py
new file mode 100644
index 0000000000000000000000000000000000000000..27de3531ebfe1e7be68ae95e8f0b134398bf4a43
--- /dev/null
+++ b/tests/pytest/query/operator_cost.py
@@ -0,0 +1,536 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import tdLog
+from util.cases import tdCases
+from util.sql import tdSql
+import random
+import time
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ self.ts = 1600000000000
+ self.num = 10
+
+ def run(self):
+ tdSql.prepare()
+ # test case for https://jira.taosdata.com:18080/browse/TD-5074
+
+ startTime = time.time()
+
+ tdSql.execute('''create stable stable_1
+ (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint,
+ q_bool bool , q_binary binary(20) , q_nchar nchar(20) ,
+ q_float float , q_double double , q_ts timestamp)
+ tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint,
+ t_bool bool , t_binary binary(20) , t_nchar nchar(20) ,
+ t_float float , t_double double , t_ts timestamp);''')
+ tdSql.execute('''create stable stable_2
+ (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint,
+ q_bool bool , q_binary binary(20) , q_nchar nchar(20) ,
+ q_float float , q_double double , q_ts timestamp)
+ tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint,
+ t_bool bool , t_binary binary(20) , t_nchar nchar(20) ,
+ t_float float , t_double double , t_ts timestamp);''')
+ tdSql.execute('''create table table_0 using stable_1
+ tags('table_0' , '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0')''')
+ tdSql.execute('''create table table_1 using stable_1
+ tags('table_1' , '2147483647' , '9223372036854775807' , '32767' , '127' , 1 ,
+ 'binary1' , 'nchar1' , '1' , '11' , \'1999-09-09 09:09:09.090\')''')
+ tdSql.execute('''create table table_2 using stable_1
+ tags('table_2' , '-2147483647' , '-9223372036854775807' , '-32767' , '-127' , false ,
+ 'binary2' , 'nchar2nchar2' , '-2.2' , '-22.22' , \'2099-09-09 09:09:09.090\')''')
+ tdSql.execute('''create table table_3 using stable_1
+ tags('table_3' , '3' , '3' , '3' , '3' , true , 'binary3' , 'nchar3' , '33.33' , '3333.3333' , '0')''')
+ tdSql.execute('''create table table_4 using stable_1
+ tags('table_4' , '4' , '4' , '4' , '4' , false , 'binary4' , 'nchar4' , '-444.444' , '-444444.444444' , '0')''')
+ tdSql.execute('''create table table_5 using stable_1
+ tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''')
+ tdSql.execute('''create table table_21 using stable_2
+ tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''')
+ #regular table
+ tdSql.execute('''create table regular_table_1
+ (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint,
+ q_bool bool , q_binary binary(20) , q_nchar nchar(20) ,
+ q_float float , q_double double , q_ts timestamp) ;''')
+
+ for i in range(self.num):
+ tdSql.execute('''insert into table_0 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)'''
+ % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i))
+ tdSql.execute('''insert into table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)'''
+ % (self.ts + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i,
+ i, i, random.random(), random.random(), 1262304000001 + i))
+ tdSql.execute('''insert into table_2 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)'''
+ % (self.ts + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i,
+ i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i))
+ tdSql.execute('''insert into table_3 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)'''
+ % (self.ts + i, random.randint(-2147483647, 2147483647),
+ random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767),
+ random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000),
+ random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i))
+ tdSql.execute('''insert into table_4 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)'''
+ % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i))
+ tdSql.execute('''insert into table_5 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)'''
+ % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i))
+ tdSql.execute('''insert into table_21 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)'''
+ % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i))
+
+ tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)'''
+ % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i))
+ tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)'''
+ % (self.ts + 100 + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i,
+ i, i, random.random(), random.random(), 1262304000001 + i))
+ tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)'''
+ % (self.ts + 200 + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i,
+ i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i))
+ tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)'''
+ % (self.ts + 300 + i, random.randint(-2147483647, 2147483647),
+ random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767),
+ random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000),
+ random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i))
+ tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)'''
+ % (self.ts + 400 + i, i, i, i, i, i, i, i, i, self.ts + i))
+ tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)'''
+ % (self.ts + 500 + i, i, i, i, i, i, i, i, i, self.ts + i))
+
+ tdLog.info("========== operator=1(OP_TableScan) ==========")
+ tdLog.info("========== operator=7(OP_Project) ==========")
+ sql = '''select * from stable_1'''
+ tdSql.query(sql)
+ tdSql.checkRows(6*self.num)
+ sql = '''select * from regular_table_1'''
+ tdSql.query(sql)
+ tdSql.checkRows(6*self.num)
+
+ tdLog.info("========== operator=14(OP_MultiTableAggregate ) ==========")
+ sql = '''select last_row(*) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkData(0,1,self.num-1)
+
+ tdLog.info("========== operator=6(OP_Aggregate) ==========")
+ sql = '''select last_row(*) from regular_table_1;'''
+ tdSql.query(sql)
+ tdSql.checkData(0,1,self.num-1)
+
+ tdLog.info("========== operator=9(OP_Limit) ==========")
+ sql = '''select * from stable_1 where loc = 'table_0' limit 5;'''
+ tdSql.query(sql)
+ tdSql.checkRows(5)
+ sql = '''select last_row(*) from (select * from stable_1 where loc = 'table_0');'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+
+ sql = '''select * from regular_table_1 ;'''
+ tdSql.query(sql)
+ tdSql.checkRows(6*self.num)
+ sql = '''select last_row(*) from (select * from regular_table_1);'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ tdSql.checkData(0,1,self.num-1)
+
+
+ sql = '''select last_row(*) from
+ ((select * from table_0) union all
+ (select * from table_1) union all
+ (select * from table_2));'''
+ tdSql.error(sql)
+
+ tdLog.info("========== operator=16(OP_DummyInput) ==========")
+ sql = '''select last_row(*) from
+ ((select last_row(*) from table_0) union all
+ (select last_row(*) from table_1) union all
+ (select last_row(*) from table_2));'''
+ tdSql.error(sql)
+
+ sql = '''select last_row(*) from
+ ((select * from table_0 limit 5 offset 5) union all
+ (select * from table_1 limit 5 offset 5) union all
+ (select * from regular_table_1 limit 5 offset 5));'''
+ tdSql.error(sql)
+
+ tdLog.info("========== operator=10(OP_SLimit) ==========")
+ sql = '''select count(*) from stable_1 group by loc slimit 3 soffset 2 ;'''
+ tdSql.query(sql)
+ tdSql.checkRows(3)
+
+ sql = '''select last_row(*) from
+ ((select * from table_0) union all
+ (select * from table_1) union all
+ (select * from table_2));'''
+ tdSql.error(sql)
+
+ tdLog.info("========== operator=20(OP_Distinct) ==========")
+ tdLog.info("========== operator=4(OP_TagScan) ==========")
+ sql = '''select distinct(t_bool) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(2)
+ sql = '''select distinct(loc) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(6)
+ sql = '''select distinct(t_int) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(6)
+ sql = '''select distinct(t_bigint) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(6)
+ sql = '''select distinct(t_smallint) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(6)
+ sql = '''select distinct(t_tinyint) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(6)
+ sql = '''select distinct(t_nchar) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(6)
+ sql = '''select distinct(t_float) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(6)
+ sql = '''select distinct(t_double) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(6)
+ sql = '''select distinct(t_ts) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(3)
+ # sql = '''select distinct(tbname) from stable_1;'''
+ # tdSql.query(sql)
+ # tdSql.checkRows(6)
+
+ tdLog.info("========== operator=2(OP_DataBlocksOptScan) ==========")
+ sql = '''select last(q_int),first(q_int) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select last(q_bigint),first(q_bigint) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select last(q_smallint),first(q_smallint) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select last(q_tinyint),first(q_tinyint) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select last(q_bool),first(q_bool) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select last(q_binary),first(q_binary) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select last(q_nchar),first(q_nchar) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select last(q_float),first(q_float) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select last(q_double),first(q_double) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select last(q_ts),first(q_ts) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),last(q_bool),last(q_binary),last(q_nchar),
+ last(q_float),last(q_double),last(q_ts),first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint),
+ first(q_bool),first(q_binary),first(q_nchar),first(q_float),first(q_float),first(q_double),first(q_ts) from stable_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),last(q_bool),last(q_binary),last(q_nchar),
+ last(q_float),last(q_double),last(q_ts),first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint),first(q_bool),
+ first(q_binary),first(q_nchar),first(q_float),first(q_float),first(q_double),first(q_ts) from regular_table_1;'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+
+ tdLog.info("========== operator=8(OP_Groupby) ==========")
+ sql = '''select stddev(q_int) from table_0 group by q_int;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from stable_1 group by q_int;'''
+ tdSql.query(sql)
+ sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from table_1 group by q_bigint;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from regular_table_1 group by q_smallint;'''
+ tdSql.query(sql)
+
+ tdLog.info("========== operator=11(OP_TimeWindow) ==========")
+ sql = '''select last(q_int) from table_0 interval(1m);'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),
+ first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from table_1 interval(1m);'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),
+ first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from stable_1 interval(1m);'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),
+ first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from regular_table_1 interval(1m);'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+
+ tdLog.info("========== operator=12(OP_SessionWindow) ==========")
+ sql = '''select count(*) from table_1 session(ts,1s);'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select count(*) from regular_table_1 session(ts,1s);'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select count(*),sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
+ sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
+ sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
+ sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
+ sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
+ sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
+ from table_1 session(ts,1s);'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ sql = '''select count(*),sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
+ sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
+ sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
+ sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
+ sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
+ sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
+ from regular_table_1 session(ts,1s);'''
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+
+ tdLog.info("========== operator=13(OP_Fill) ==========")
+ sql = '''select sum(q_int) from table_0
+ where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);'''
+ tdSql.query(sql)
+ tdSql.checkData(0,1,'None')
+ sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
+ sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
+ sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
+ sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
+ sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
+ sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
+ from stable_1 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);'''
+ tdSql.query(sql)
+ tdSql.checkData(0,1,'None')
+ sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
+ sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
+ sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
+ sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
+ sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
+ sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
+ from regular_table_1 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);'''
+ tdSql.query(sql)
+ tdSql.checkData(0,1,'None')
+ sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
+ sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
+ sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
+ sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
+ sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
+ sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
+ from table_0 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);'''
+ tdSql.query(sql)
+ tdSql.checkData(0,1,'None')
+ #TD-5190
+ sql = '''select sum(q_tinyint),stddev(q_float) from stable_1
+ where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);'''
+ tdSql.query(sql)
+ tdSql.checkData(0,1,'None')
+
+ tdLog.info("========== operator=15(OP_MultiTableTimeInterval) ==========")
+ sql = '''select avg(q_int) from stable_1 where ts=0;'''
+ tdSql.query(sql)
+ tdSql.checkData(0,0,'table_0')
+ sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
+ sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
+ sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
+ sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
+ sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
+ sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
+ from stable_1 group by loc having sum(q_int)>=0;'''
+ tdSql.query(sql)
+ tdSql.checkData(0,0,'table_0')
+ sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
+ sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
+ sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
+ sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
+ sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
+ sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
+ from stable_1 group by loc having avg(q_int)>=0;'''
+ tdSql.query(sql)
+ tdSql.checkData(0,0,'table_0')
+ sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
+ sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
+ sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
+ sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
+ sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
+ sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
+ from stable_1 group by loc having min(q_int)>=0;'''
+ tdSql.query(sql)
+ tdSql.checkData(0,0,'table_0')
+ sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
+ sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
+ sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
+ sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
+ sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
+ sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
+ from stable_1 group by loc having max(q_int)>=0;'''
+ tdSql.query(sql)
+ tdSql.checkData(0,0,'table_0')
+ sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
+ sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
+ sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
+ sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
+ sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
+ sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
+ from stable_1 group by loc having first(q_int)>=0;'''
+ tdSql.query(sql)
+ tdSql.checkData(0,0,'table_0')
+ sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
+ sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
+ sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
+ sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
+ sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
+ sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
+ from stable_1 group by loc having last(q_int)>=0;'''
+ tdSql.query(sql)
+ tdSql.checkData(0,0,'table_0')
+
+ tdLog.info("========== operator=21(OP_Join) ==========")
+ sql = '''select t1.q_int,t2.q_int from
+ (select ts,q_int from table_1) t1 , (select ts,q_int from table_2) t2
+ where t2.ts = t1.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select t1.*,t2.* from
+ (select * from table_1) t1 , (select * from table_2) t2
+ where t2.ts = t1.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select t1.*,t2.* from
+ (select * from regular_table_1) t1 , (select * from table_0) t2
+ where t2.ts = t1.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select t1.*,t2.* from
+ (select * from stable_1) t1 , (select * from table_2) t2
+ where t2.ts = t1.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select t1.*,t2.* from
+ (select * from regular_table_1) t1 , (select * from stable_1) t2
+ where t2.ts = t1.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select t1.*,t2.*,t3.* from
+ (select * from regular_table_1) t1 , (select * from stable_1) t2, (select * from table_0) t3
+ where t2.ts = t1.ts and t3.ts = t1.ts and t2.ts = t3.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+
+ tdLog.info("========== operator=22(OP_StateWindow) ==========")
+ sql = '''select avg(q_int),sum(q_smallint) from table_1 state_window(q_int);'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
+ sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
+ sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
+ sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
+ sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
+ sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
+ from table_1 state_window(q_bigint);'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int),
+ sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint),
+ sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint),
+ sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint),
+ sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float),
+ sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double)
+ from regular_table_1 state_window(q_smallint);'''
+ tdSql.query(sql)
+ tdSql.checkRows(6*self.num)
+
+ endTime = time.time()
+ print("total time %ds" % (endTime - startTime))
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
diff --git a/tests/pytest/query/queryError.py b/tests/pytest/query/queryError.py
index 539ce5141fae9cfba7ba63b569179c0170ef428d..ac78c0518f36a847652499aaa5722d4585e42d44 100644
--- a/tests/pytest/query/queryError.py
+++ b/tests/pytest/query/queryError.py
@@ -51,7 +51,7 @@ class TDTestCase:
tdSql.error("select last_row as latest from st")
# query distinct on normal colnum
- tdSql.error("select distinct tagtype from st")
+ #tdSql.error("select distinct tagtype from st")
# query .. order by non-time field
tdSql.error("select * from st order by name")
diff --git a/tests/pytest/query/queryInterval.py b/tests/pytest/query/queryInterval.py
index d61e8cf288c97fc869f19cba6bd3d181dc60797c..129d3adc92255cfd8bb20f4622b23b2141824f88 100644
--- a/tests/pytest/query/queryInterval.py
+++ b/tests/pytest/query/queryInterval.py
@@ -73,6 +73,52 @@ class TDTestCase:
tdSql.checkData(6, 0, "2020-09-16 00:00:00")
tdSql.checkData(6, 1, 222.0)
+ # test case for https://jira.taosdata.com:18080/browse/TD-5338
+ tdSql.query("select loc,max(voltage) from st interval(1m);")
+ tdSql.checkRows(8)
+ tdSql.checkData(0, 0, "2020-07-01 04:24:00.000")
+ tdSql.checkData(0, 1, "beijing")
+ tdSql.checkData(0, 2, 220)
+ tdSql.checkData(1, 0, "2020-07-12 18:11:00.000")
+ tdSql.checkData(1, 1, "beijing")
+ tdSql.checkData(1, 2, 221)
+ tdSql.checkData(2, 0, "2020-07-24 07:58:00.000")
+ tdSql.checkData(2, 1, "beijing")
+ tdSql.checkData(2, 2, 225)
+ tdSql.checkData(3, 0, "2020-08-04 21:44:00.000")
+ tdSql.checkData(2, 1, "beijing")
+ tdSql.checkData(3, 2, 228)
+ tdSql.checkData(4, 0, "2020-08-16 11:31:00.000")
+ tdSql.checkData(4, 1, "shanghai")
+ tdSql.checkData(4, 2, 225)
+ tdSql.checkData(5, 0, "2020-08-28 01:18:00.000")
+ tdSql.checkData(5, 1, "shanghai")
+ tdSql.checkData(5, 2, 228)
+ tdSql.checkData(6, 0, "2020-09-08 15:04:00.000")
+ tdSql.checkData(6, 1, "beijing")
+ tdSql.checkData(6, 2, 222)
+ tdSql.checkData(7, 0, "2020-09-20 04:51:00.000")
+ tdSql.checkData(7, 1, "shanghai")
+ tdSql.checkData(7, 2, 222)
+ tdSql.query("select loc,max(voltage) from t0 interval(1m);")
+ tdSql.checkRows(5)
+ tdSql.checkData(0, 0, "2020-07-01 04:24:00.000")
+ tdSql.checkData(0, 1, "beijing")
+ tdSql.checkData(0, 2, 220)
+ tdSql.checkData(1, 0, "2020-07-12 18:11:00.000")
+ tdSql.checkData(1, 1, "beijing")
+ tdSql.checkData(1, 2, 221)
+ tdSql.checkData(2, 0, "2020-07-24 07:58:00.000")
+ tdSql.checkData(2, 1, "beijing")
+ tdSql.checkData(2, 2, 225)
+ tdSql.checkData(3, 0, "2020-08-04 21:44:00.000")
+ tdSql.checkData(2, 1, "beijing")
+ tdSql.checkData(3, 2, 228)
+ tdSql.checkData(4, 0, "2020-09-08 15:04:00.000")
+ tdSql.checkData(4, 1, "beijing")
+ tdSql.checkData(4, 2, 222)
+
+
# test case for https://jira.taosdata.com:18080/browse/TD-2298
tdSql.execute("create database test keep 36500")
tdSql.execute("use test")
diff --git a/tests/pytest/query/queryStableJoin.py b/tests/pytest/query/queryStableJoin.py
new file mode 100644
index 0000000000000000000000000000000000000000..825942bad8f867301281ffab46f1b7df20fa23f6
--- /dev/null
+++ b/tests/pytest/query/queryStableJoin.py
@@ -0,0 +1,300 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import tdLog
+from util.cases import tdCases
+from util.sql import tdSql
+import random
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ self.ts = 1600000000000
+ self.num = 10
+
+ def run(self):
+ tdSql.prepare()
+ # test case for https://jira.taosdata.com:18080/browse/TD-5206
+
+ tdSql.execute('''create stable stable_1
+ (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint,
+ q_bool bool , q_binary binary(20) , q_nchar nchar(20) ,q_float float , q_double double , q_ts timestamp)
+ tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint,
+ t_bool bool , t_binary binary(20) , t_nchar nchar(20) ,t_float float , t_double double );''')
+ tdSql.execute('''create stable stable_2
+ (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint,
+ q_bool bool , q_binary binary(20) , q_nchar nchar(20) ,q_float float , q_double double , q_ts timestamp)
+ tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint,
+ t_bool bool , t_binary binary(20) , t_nchar nchar(20) ,t_float float , t_double double );''')
+ tdSql.execute('''create table table_0 using stable_1
+ tags('table_0' , '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' )''')
+ tdSql.execute('''create table table_1 using stable_1
+ tags('table_1' , '2147483647' , '9223372036854775807' , '32767' , '127' , 1 ,
+ 'binary1' , 'nchar1' , '1' , '11' )''')
+ tdSql.execute('''create table table_2 using stable_1
+ tags('table_2' , '-2147483647' , '-9223372036854775807' , '-32767' , '-127' , false ,
+ 'binary2' , 'nchar2nchar2' , '-2.2' , '-22.22')''')
+ tdSql.execute('''create table table_3 using stable_1
+ tags('table_3' , '3' , '3' , '3' , '3' , true , 'binary3' , 'nchar3' , '33.33' , '3333.3333' )''')
+ tdSql.execute('''create table table_4 using stable_1
+ tags('table_4' , '4' , '4' , '4' , '4' , false , 'binary4' , 'nchar4' , '-444.444' , '-444444.444444' )''')
+ tdSql.execute('''create table table_5 using stable_1
+ tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' )''')
+ tdSql.execute('''create table table_21 using stable_2
+ tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' )''')
+
+ for i in range(self.num):
+ tdSql.execute('''insert into table_0 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)'''
+ % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i))
+ tdSql.execute('''insert into table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)'''
+ % (self.ts + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i,
+ i, i, random.random(), random.random(), 1262304000001 + i))
+ tdSql.execute('''insert into table_2 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)'''
+ % (self.ts + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i,
+ i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i))
+ tdSql.execute('''insert into table_3 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)'''
+ % (self.ts + i, random.randint(-2147483647, 2147483647),
+ random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767),
+ random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000),
+ random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i))
+ tdSql.execute('''insert into table_4 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)'''
+ % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i))
+ tdSql.execute('''insert into table_5 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)'''
+ % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i))
+ tdSql.execute('''insert into table_21 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)'''
+ % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i))
+
+
+ tdLog.info("==========TEST1:test all table data==========")
+ sql = '''select * from stable_1,stable_2 where stable_1.t_nchar = stable_2.t_nchar and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_tinyint = stable_2.t_tinyint and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_binary = stable_2.t_binary and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_double = stable_2.t_double and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_smallint = stable_2.t_smallint and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_bigint = stable_2.t_bigint and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_int = stable_2.t_int and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_float = stable_2.t_float and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_bool = stable_2.t_bool and stable_1.ts = stable_2.ts;'''
+ tdSql.error(sql)
+
+ tdLog.info("==========TEST1:test drop table_0 data==========")
+ sql = '''drop table table_0;'''
+ tdSql.execute(sql)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_nchar = stable_2.t_nchar and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_tinyint = stable_2.t_tinyint and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_binary = stable_2.t_binary and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_double = stable_2.t_double and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_smallint = stable_2.t_smallint and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_bigint = stable_2.t_bigint and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_int = stable_2.t_int and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_float = stable_2.t_float and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_bool = stable_2.t_bool and stable_1.ts = stable_2.ts;'''
+ tdSql.error(sql)
+
+ tdLog.info("==========TEST1:test drop table_1 data==========")
+ sql = '''drop table table_1;'''
+ tdSql.execute(sql)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_nchar = stable_2.t_nchar and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_tinyint = stable_2.t_tinyint and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_binary = stable_2.t_binary and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_double = stable_2.t_double and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_smallint = stable_2.t_smallint and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_bigint = stable_2.t_bigint and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_int = stable_2.t_int and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_float = stable_2.t_float and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_bool = stable_2.t_bool and stable_1.ts = stable_2.ts;'''
+ tdSql.error(sql)
+
+ tdLog.info("==========TEST1:test drop table_2 data==========")
+ sql = '''drop table table_2;'''
+ tdSql.execute(sql)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_nchar = stable_2.t_nchar and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_tinyint = stable_2.t_tinyint and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_binary = stable_2.t_binary and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_double = stable_2.t_double and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_smallint = stable_2.t_smallint and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_bigint = stable_2.t_bigint and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_int = stable_2.t_int and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_float = stable_2.t_float and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_bool = stable_2.t_bool and stable_1.ts = stable_2.ts;'''
+ tdSql.error(sql)
+
+ tdLog.info("==========TEST1:test drop table_3 data==========")
+ sql = '''drop table table_3;'''
+ tdSql.execute(sql)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_nchar = stable_2.t_nchar and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_tinyint = stable_2.t_tinyint and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_binary = stable_2.t_binary and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_double = stable_2.t_double and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_smallint = stable_2.t_smallint and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_bigint = stable_2.t_bigint and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_int = stable_2.t_int and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_float = stable_2.t_float and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_bool = stable_2.t_bool and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+
+ tdLog.info("==========TEST1:test drop table_4 data==========")
+ sql = '''drop table table_4;'''
+ tdSql.execute(sql)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_nchar = stable_2.t_nchar and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_tinyint = stable_2.t_tinyint and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_binary = stable_2.t_binary and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_double = stable_2.t_double and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_smallint = stable_2.t_smallint and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_bigint = stable_2.t_bigint and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_int = stable_2.t_int and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_float = stable_2.t_float and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_bool = stable_2.t_bool and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(self.num)
+
+ tdLog.info("==========TEST1:test drop table_5 data==========")
+ sql = '''drop table table_5;'''
+ tdSql.execute(sql)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_nchar = stable_2.t_nchar and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(0)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_tinyint = stable_2.t_tinyint and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(0)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_binary = stable_2.t_binary and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(0)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_double = stable_2.t_double and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(0)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_smallint = stable_2.t_smallint and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(0)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_bigint = stable_2.t_bigint and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(0)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_int = stable_2.t_int and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(0)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_float = stable_2.t_float and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(0)
+ sql = '''select * from stable_1,stable_2 where stable_1.t_bool = stable_2.t_bool and stable_1.ts = stable_2.ts;'''
+ tdSql.query(sql)
+ tdSql.checkRows(0)
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.py b/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.py
index 4edef88cf182eee88e42615fb007bbe4756f0c7c..7f551bcefd152007ebab7a1bc7d110468b69115a 100644
--- a/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.py
+++ b/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.py
@@ -176,12 +176,8 @@ class TDTestCase:
tdSql.checkData(0, 1, 5)
tdSql.checkData(1, 1, 6)
tdSql.checkData(2, 1, 7)
-
-
os.system("rm -rf tools/taosdemoAllTest/TD-4985/query-limit-offset.py.sql")
-
-
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
diff --git a/tests/pytest/tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py b/tests/pytest/tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py
new file mode 100644
index 0000000000000000000000000000000000000000..dfa829866d945b06d232aeeaba266b11ae229234
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py
@@ -0,0 +1,703 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import random
+import string
+import os
+import time
+from util.log import tdLog
+from util.cases import tdCases
+from util.sql import tdSql
+from util.dnodes import tdDnodes
+
+class TDTestCase:
+ updatecfgDict={'maxSQLLength':1048576}
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ self.ts = 1538548685000
+ self.num = 100
+
+ def get_random_string(self, length):
+ letters = string.ascii_lowercase
+ result_str = ''.join(random.choice(letters) for i in range(length))
+ return result_str
+
+ def run(self):
+ tdSql.prepare()
+ # test case for https://jira.taosdata.com:18080/browse/TD-5213
+
+ print("==============step1, regular table, 1 ts + 4094 cols + 1 binary==============")
+ startTime = time.time()
+ sql = "create table regular_table_1(ts timestamp, "
+ for i in range(4094):
+ sql += "col%d int, " % (i + 1)
+ sql += "col4095 binary(22))"
+ tdLog.info(len(sql))
+ tdSql.execute(sql)
+
+ for i in range(self.num):
+ sql = "insert into regular_table_1 values(%d, "
+ for j in range(4094):
+ str = "'%s', " % random.randint(0,1000)
+ sql += str
+ sql += "'%s')" % self.get_random_string(22)
+ tdSql.execute(sql % (self.ts + i))
+ time.sleep(1)
+ tdSql.query("select count(*) from regular_table_1")
+ tdSql.checkData(0, 0, self.num)
+ tdSql.query("select * from regular_table_1")
+ tdSql.checkRows(self.num)
+ tdSql.checkCols(4096)
+
+ endTime = time.time()
+ print("total time %ds" % (endTime - startTime))
+
+ #insert in order
+ tdLog.info('test insert in order')
+ for i in range(self.num):
+ sql = "insert into regular_table_1 (ts,col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col4095) values(%d, "
+ for j in range(10):
+ str = "'%s', " % random.randint(0,1000)
+ sql += str
+ sql += "'%s')" % self.get_random_string(22)
+ tdSql.execute(sql % (self.ts + i + 1000))
+ time.sleep(1)
+ tdSql.query("select count(*) from regular_table_1")
+ tdSql.checkData(0, 0, 2*self.num)
+ tdSql.query("select * from regular_table_1")
+ tdSql.checkRows(2*self.num)
+ tdSql.checkCols(4096)
+
+ #insert out of order
+ tdLog.info('test insert out of order')
+ for i in range(self.num):
+ sql = "insert into regular_table_1 (ts,col123,col2213,col331,col41,col523,col236,col71,col813,col912,col1320,col4095) values(%d, "
+ for j in range(10):
+ str = "'%s', " % random.randint(0,1000)
+ sql += str
+ sql += "'%s')" % self.get_random_string(22)
+ tdSql.execute(sql % (self.ts + i + 2000))
+ time.sleep(1)
+ tdSql.query("select count(*) from regular_table_1")
+ tdSql.checkData(0, 0, 3*self.num)
+ tdSql.query("select * from regular_table_1")
+ tdSql.checkRows(3*self.num)
+ tdSql.checkCols(4096)
+
+
+ print("==============step2,regular table error col or value==============")
+ tdLog.info('test regular table exceeds row num')
+ # column > 4096
+ sql = "create table regular_table_2(ts timestamp, "
+ for i in range(4095):
+ sql += "col%d int, " % (i + 1)
+ sql += "col4096 binary(22))"
+ tdLog.info(len(sql))
+ tdSql.error(sql)
+
+ # column > 4096
+ sql = "insert into regular_table_1 values(%d, "
+ for j in range(4095):
+ str = "'%s', " % random.randint(0,1000)
+ sql += str
+ sql += "'%s')" % self.get_random_string(22)
+ tdSql.error(sql)
+
+ # insert column < 4096
+ sql = "insert into regular_table_1 values(%d, "
+ for j in range(4092):
+ str = "'%s', " % random.randint(0,1000)
+ sql += str
+ sql += "'%s')" % self.get_random_string(22)
+ tdSql.error(sql)
+
+ # alter column > 4096
+ sql = "alter table regular_table_1 add column max int; "
+ tdSql.error(sql)
+
+ print("==============step3,regular table , mix data type==============")
+ startTime = time.time()
+ sql = "create table regular_table_3(ts timestamp, "
+ for i in range(2000):
+ sql += "col%d int, " % (i + 1)
+ for i in range(2000,4094):
+ sql += "col%d bigint, " % (i + 1)
+ sql += "col4095 binary(22))"
+ tdLog.info(len(sql))
+ tdSql.execute(sql)
+
+ for i in range(self.num):
+ sql = "insert into regular_table_3 values(%d, "
+ for j in range(4094):
+ str = "'%s', " % random.randint(0,1000)
+ sql += str
+ sql += "'%s')" % self.get_random_string(22)
+ tdSql.execute(sql % (self.ts + i))
+ time.sleep(1)
+ tdSql.query("select count(*) from regular_table_3")
+ tdSql.checkData(0, 0, self.num)
+ tdSql.query("select * from regular_table_3")
+ tdSql.checkRows(self.num)
+ tdSql.checkCols(4096)
+
+ endTime = time.time()
+ print("total time %ds" % (endTime - startTime))
+
+ sql = "create table regular_table_4(ts timestamp, "
+ for i in range(500):
+ sql += "int_%d int, " % (i + 1)
+ for i in range(500,1000):
+ sql += "smallint_%d smallint, " % (i + 1)
+ for i in range(1000,1500):
+ sql += "tinyint_%d tinyint, " % (i + 1)
+ for i in range(1500,2000):
+ sql += "double_%d double, " % (i + 1)
+ for i in range(2000,2500):
+ sql += "float_%d float, " % (i + 1)
+ for i in range(2500,3000):
+ sql += "bool_%d bool, " % (i + 1)
+ for i in range(3000,3500):
+ sql += "bigint_%d bigint, " % (i + 1)
+ for i in range(3500,3800):
+ sql += "nchar_%d nchar(4), " % (i + 1)
+ for i in range(3800,4090):
+ sql += "binary_%d binary(10), " % (i + 1)
+ for i in range(4090,4094):
+ sql += "timestamp_%d timestamp, " % (i + 1)
+ sql += "col4095 binary(22))"
+ tdLog.info(len(sql))
+ tdSql.execute(sql)
+
+ for i in range(self.num):
+ sql = "insert into regular_table_4 values(%d, "
+ for j in range(500):
+ str = "'%s', " % random.randint(-2147483647,2147483647)
+ sql += str
+ for j in range(500,1000):
+ str = "'%s', " % random.randint(-32767,32767 )
+ sql += str
+ for j in range(1000,1500):
+ str = "'%s', " % random.randint(-127,127)
+ sql += str
+ for j in range(1500,2000):
+ str = "'%s', " % random.randint(-922337203685477580700,922337203685477580700)
+ sql += str
+ for j in range(2000,2500):
+ str = "'%s', " % random.randint(-92233720368547758070,92233720368547758070)
+ sql += str
+ for j in range(2500,3000):
+ str = "'%s', " % random.choice(['true','false'])
+ sql += str
+ for j in range(3000,3500):
+ str = "'%s', " % random.randint(-9223372036854775807,9223372036854775807)
+ sql += str
+ for j in range(3500,3800):
+ str = "'%s', " % self.get_random_string(4)
+ sql += str
+ for j in range(3800,4090):
+ str = "'%s', " % self.get_random_string(10)
+ sql += str
+ for j in range(4090,4094):
+ str = "%s, " % (self.ts + j)
+ sql += str
+ sql += "'%s')" % self.get_random_string(22)
+ tdSql.execute(sql % (self.ts + i))
+ time.sleep(1)
+ tdSql.query("select count(*) from regular_table_4")
+ tdSql.checkData(0, 0, self.num)
+ tdSql.query("select * from regular_table_4")
+ tdSql.checkRows(self.num)
+ tdSql.checkCols(4096)
+ tdLog.info("end ,now new one")
+
+ #insert null value
+ tdLog.info('test insert null value')
+ for i in range(self.num):
+ sql = "insert into regular_table_4 values(%d, "
+ for j in range(2500):
+ str = "'%s', " % random.choice(['NULL' ,'NULL' ,'NULL' ,1 , 10 ,100 ,-100 ,-10, 88 ,66 ,'NULL' ,'NULL' ,'NULL' ])
+ sql += str
+ for j in range(2500,3000):
+ str = "'%s', " % random.choice(['true' ,'false'])
+ sql += str
+ for j in range(3000,3500):
+ str = "'%s', " % random.randint(-9223372036854775807,9223372036854775807)
+ sql += str
+ for j in range(3500,3800):
+ str = "'%s', " % self.get_random_string(4)
+ sql += str
+ for j in range(3800,4090):
+ str = "'%s', " % self.get_random_string(10)
+ sql += str
+ for j in range(4090,4094):
+ str = "%s, " % (self.ts + j)
+ sql += str
+ sql += "'%s')" % self.get_random_string(22)
+ tdSql.execute(sql % (self.ts + i + 10000))
+ time.sleep(1)
+ tdSql.query("select count(*) from regular_table_4")
+ tdSql.checkData(0, 0, 2*self.num)
+ tdSql.query("select * from regular_table_4")
+ tdSql.checkRows(2*self.num)
+ tdSql.checkCols(4096)
+
+ #insert in order
+ tdLog.info('test insert in order')
+ for i in range(self.num):
+ sql = "insert into regular_table_4 (ts,int_2,int_22,int_169,smallint_537,smallint_607,tinyint_1030,tinyint_1491,double_1629,double_1808,float_2075,col4095) values(%d, "
+ for j in range(10):
+ str = "'%s', " % random.randint(0,100)
+ sql += str
+ sql += "'%s')" % self.get_random_string(22)
+ tdSql.execute(sql % (self.ts + i + 1000))
+ time.sleep(1)
+ tdSql.query("select count(*) from regular_table_4")
+ tdSql.checkData(0, 0, 3*self.num)
+ tdSql.query("select * from regular_table_4")
+ tdSql.checkRows(3*self.num)
+ tdSql.checkCols(4096)
+
+ #insert out of order
+ tdLog.info('test insert out of order')
+ for i in range(self.num):
+ sql = "insert into regular_table_4 (ts,int_169,float_2075,int_369,tinyint_1491,tinyint_1030,float_2360,smallint_537,double_1808,double_1608,double_1629,col4095) values(%d, "
+ for j in range(10):
+ str = "'%s', " % random.randint(0,100)
+ sql += str
+ sql += "'%s')" % self.get_random_string(22)
+ tdSql.execute(sql % (self.ts + i + 2000))
+ time.sleep(1)
+ tdSql.query("select count(*) from regular_table_4")
+ tdSql.checkData(0, 0, 4*self.num)
+ tdSql.query("select * from regular_table_4")
+ tdSql.checkRows(4*self.num)
+ tdSql.checkCols(4096)
+
+ #define TSDB_MAX_BYTES_PER_ROW 49151[old:1024 && 16384]
+ #ts:8\int:4\smallint:2\bigint:8\bool:1\float:4\tinyint:1\nchar:4*()+2[offset]\binary:1*()+2[offset]
+ tdLog.info('test regular_table max bytes per row 49151')
+ sql = "create table regular_table_5(ts timestamp, "
+ for i in range(500):
+ sql += "int_%d int, " % (i + 1)
+ for i in range(500,1000):
+ sql += "smallint_%d smallint, " % (i + 1)
+ for i in range(1000,1500):
+ sql += "tinyint_%d tinyint, " % (i + 1)
+ for i in range(1500,2000):
+ sql += "double_%d double, " % (i + 1)
+ for i in range(2000,2500):
+ sql += "float_%d float, " % (i + 1)
+ for i in range(2500,3000):
+ sql += "bool_%d bool, " % (i + 1)
+ for i in range(3000,3500):
+ sql += "bigint_%d bigint, " % (i + 1)
+ for i in range(3500,3800):
+ sql += "nchar_%d nchar(20), " % (i + 1)
+ for i in range(3800,4090):
+ sql += "binary_%d binary(34), " % (i + 1)
+ for i in range(4090,4094):
+ sql += "timestamp_%d timestamp, " % (i + 1)
+ sql += "col4095 binary(69))"
+ tdSql.execute(sql)
+ tdSql.query("select * from regular_table_5")
+ tdSql.checkCols(4096)
+ # TD-5324
+ sql = "alter table regular_table_5 modify column col4095 binary(70); "
+ tdSql.error(sql)
+
+ # drop and add
+ sql = "alter table regular_table_5 drop column col4095; "
+ tdSql.execute(sql)
+ sql = "select * from regular_table_5; "
+ tdSql.query(sql)
+ tdSql.checkCols(4095)
+ sql = "alter table regular_table_5 add column col4095 binary(70); "
+ tdSql.error(sql)
+ sql = "alter table regular_table_5 add column col4095 binary(69); "
+ tdSql.execute(sql)
+ sql = "select * from regular_table_5; "
+ tdSql.query(sql)
+ tdSql.checkCols(4096)
+
+ #out TSDB_MAX_BYTES_PER_ROW 49151
+ tdLog.info('test regular_table max bytes per row out 49151')
+ sql = "create table regular_table_6(ts timestamp, "
+ for i in range(500):
+ sql += "int_%d int, " % (i + 1)
+ for i in range(500,1000):
+ sql += "smallint_%d smallint, " % (i + 1)
+ for i in range(1000,1500):
+ sql += "tinyint_%d tinyint, " % (i + 1)
+ for i in range(1500,2000):
+ sql += "double_%d double, " % (i + 1)
+ for i in range(2000,2500):
+ sql += "float_%d float, " % (i + 1)
+ for i in range(2500,3000):
+ sql += "bool_%d bool, " % (i + 1)
+ for i in range(3000,3500):
+ sql += "bigint_%d bigint, " % (i + 1)
+ for i in range(3500,3800):
+ sql += "nchar_%d nchar(20), " % (i + 1)
+ for i in range(3800,4090):
+ sql += "binary_%d binary(34), " % (i + 1)
+ for i in range(4090,4094):
+ sql += "timestamp_%d timestamp, " % (i + 1)
+ sql += "col4095 binary(70))"
+ tdLog.info(len(sql))
+ tdSql.error(sql)
+
+
+ print("==============step4, super table , 1 ts + 4090 cols + 4 tags ==============")
+ startTime = time.time()
+ sql = "create stable stable_1(ts timestamp, "
+ for i in range(4090):
+ sql += "col%d int, " % (i + 1)
+ sql += "col4091 binary(22))"
+ sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) "
+ tdLog.info(len(sql))
+ tdSql.execute(sql)
+ sql = '''create table table_0 using stable_1
+ tags('table_0' , '1' , '2' , '3' );'''
+ tdSql.execute(sql)
+
+ for i in range(self.num):
+ sql = "insert into table_0 values(%d, "
+ for j in range(4090):
+ str = "'%s', " % random.randint(0,1000)
+ sql += str
+ sql += "'%s')" % self.get_random_string(22)
+ tdSql.execute(sql % (self.ts + i))
+ time.sleep(1)
+ tdSql.query("select count(*) from table_0")
+ tdSql.checkData(0, 0, self.num)
+ tdSql.query("select * from table_0")
+ tdSql.checkRows(self.num)
+ tdSql.checkCols(4092)
+
+ sql = '''create table table_1 using stable_1
+ tags('table_1' , '1' , '2' , '3' );'''
+ tdSql.execute(sql)
+
+ for i in range(self.num):
+ sql = "insert into table_1 values(%d, "
+ for j in range(2080):
+ sql += "'%d', " % random.randint(0,1000)
+ for j in range(2080,4080):
+ sql += "'%s', " % 'NULL'
+ for j in range(4080,4090):
+ sql += "'%s', " % random.randint(0,10000)
+ sql += "'%s')" % self.get_random_string(22)
+ tdSql.execute(sql % (self.ts + i))
+ time.sleep(1)
+ tdSql.query("select count(*) from table_1")
+ tdSql.checkData(0, 0, self.num)
+ tdSql.query("select * from table_1")
+ tdSql.checkRows(self.num)
+ tdSql.checkCols(4092)
+
+ endTime = time.time()
+ print("total time %ds" % (endTime - startTime))
+
+ #insert in order
+ tdLog.info('test insert in order')
+ for i in range(self.num):
+ sql = "insert into table_1 (ts,col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col4091) values(%d, "
+ for j in range(10):
+ str = "'%s', " % random.randint(0,1000)
+ sql += str
+ sql += "'%s')" % self.get_random_string(22)
+ tdSql.execute(sql % (self.ts + i + 1000))
+ time.sleep(1)
+ tdSql.query("select count(*) from table_1")
+ tdSql.checkData(0, 0, 2*self.num)
+ tdSql.query("select * from table_1")
+ tdSql.checkRows(2*self.num)
+ tdSql.checkCols(4092)
+
+ #insert out of order
+ tdLog.info('test insert out of order')
+ for i in range(self.num):
+ sql = "insert into table_1 (ts,col123,col2213,col331,col41,col523,col236,col71,col813,col912,col1320,col4091) values(%d, "
+ for j in range(10):
+ str = "'%s', " % random.randint(0,1000)
+ sql += str
+ sql += "'%s')" % self.get_random_string(22)
+ tdSql.execute(sql % (self.ts + i + 2000))
+ time.sleep(1)
+ tdSql.query("select count(*) from table_1")
+ tdSql.checkData(0, 0, 3*self.num)
+ tdSql.query("select * from table_1")
+ tdSql.checkRows(3*self.num)
+ tdSql.checkCols(4092)
+
+ print("==============step5,stable table , mix data type==============")
+ sql = "create stable stable_3(ts timestamp, "
+ for i in range(500):
+ sql += "int_%d int, " % (i + 1)
+ for i in range(500,1000):
+ sql += "smallint_%d smallint, " % (i + 1)
+ for i in range(1000,1500):
+ sql += "tinyint_%d tinyint, " % (i + 1)
+ for i in range(1500,2000):
+ sql += "double_%d double, " % (i + 1)
+ for i in range(2000,2500):
+ sql += "float_%d float, " % (i + 1)
+ for i in range(2500,3000):
+ sql += "bool_%d bool, " % (i + 1)
+ for i in range(3000,3500):
+ sql += "bigint_%d bigint, " % (i + 1)
+ for i in range(3500,3800):
+ sql += "nchar_%d nchar(4), " % (i + 1)
+ for i in range(3800,4090):
+ sql += "binary_%d binary(10), " % (i + 1)
+ sql += "col4091 binary(22))"
+ sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) "
+ tdLog.info(len(sql))
+ tdSql.execute(sql)
+ sql = '''create table table_30 using stable_3
+ tags('table_30' , '1' , '2' , '3' );'''
+ tdSql.execute(sql)
+
+ for i in range(self.num):
+ sql = "insert into table_30 values(%d, "
+ for j in range(500):
+ str = "'%s', " % random.randint(-2147483647,2147483647)
+ sql += str
+ for j in range(500,1000):
+ str = "'%s', " % random.randint(-32767,32767 )
+ sql += str
+ for j in range(1000,1500):
+ str = "'%s', " % random.randint(-127,127)
+ sql += str
+ for j in range(1500,2000):
+ str = "'%s', " % random.randint(-922337203685477580700,922337203685477580700)
+ sql += str
+ for j in range(2000,2500):
+ str = "'%s', " % random.randint(-92233720368547758070,92233720368547758070)
+ sql += str
+ for j in range(2500,3000):
+ str = "'%s', " % random.choice(['true','false'])
+ sql += str
+ for j in range(3000,3500):
+ str = "'%s', " % random.randint(-9223372036854775807,9223372036854775807)
+ sql += str
+ for j in range(3500,3800):
+ str = "'%s', " % self.get_random_string(4)
+ sql += str
+ for j in range(3800,4090):
+ str = "'%s', " % self.get_random_string(10)
+ sql += str
+ sql += "'%s')" % self.get_random_string(22)
+ tdSql.execute(sql % (self.ts + i))
+ time.sleep(1)
+ tdSql.query("select count(*) from table_30")
+ tdSql.checkData(0, 0, self.num)
+ tdSql.query("select * from table_30")
+ tdSql.checkRows(self.num)
+ tdSql.checkCols(4092)
+
+ #insert null value
+ tdLog.info('test insert null value')
+ sql = '''create table table_31 using stable_3
+ tags('table_31' , '1' , '2' , '3' );'''
+ tdSql.execute(sql)
+
+ for i in range(self.num):
+ sql = "insert into table_31 values(%d, "
+ for j in range(2500):
+ str = "'%s', " % random.choice(['NULL' ,'NULL' ,'NULL' ,1 , 10 ,100 ,-100 ,-10, 88 ,66 ,'NULL' ,'NULL' ,'NULL' ])
+ sql += str
+ for j in range(2500,3000):
+ str = "'%s', " % random.choice(['true' ,'false'])
+ sql += str
+ for j in range(3000,3500):
+ str = "'%s', " % random.randint(-9223372036854775807,9223372036854775807)
+ sql += str
+ for j in range(3500,3800):
+ str = "'%s', " % self.get_random_string(4)
+ sql += str
+ for j in range(3800,4090):
+ str = "'%s', " % self.get_random_string(10)
+ sql += str
+ sql += "'%s')" % self.get_random_string(22)
+ tdSql.execute(sql % (self.ts + i))
+ time.sleep(1)
+ tdSql.query("select count(*) from table_31")
+ tdSql.checkData(0, 0, self.num)
+ tdSql.query("select * from table_31")
+ tdSql.checkRows(self.num)
+ tdSql.checkCols(4092)
+
+ #insert in order
+ tdLog.info('test insert in order')
+ for i in range(self.num):
+ sql = "insert into table_31 (ts,int_2,int_22,int_169,smallint_537,smallint_607,tinyint_1030,tinyint_1491,double_1629,double_1808,float_2075,col4091) values(%d, "
+ for j in range(10):
+ str = "'%s', " % random.randint(0,100)
+ sql += str
+ sql += "'%s')" % self.get_random_string(22)
+ tdSql.execute(sql % (self.ts + i + 1000))
+ time.sleep(1)
+ tdSql.query("select count(*) from table_31")
+ tdSql.checkData(0, 0, 2*self.num)
+ tdSql.query("select * from table_31")
+ tdSql.checkRows(2*self.num)
+ tdSql.checkCols(4092)
+
+ #insert out of order
+ tdLog.info('test insert out of order')
+ for i in range(self.num):
+ sql = "insert into table_31 (ts,int_169,float_2075,int_369,tinyint_1491,tinyint_1030,float_2360,smallint_537,double_1808,double_1608,double_1629,col4091) values(%d, "
+ for j in range(10):
+ str = "'%s', " % random.randint(0,100)
+ sql += str
+ sql += "'%s')" % self.get_random_string(22)
+ tdSql.execute(sql % (self.ts + i + 2000))
+ time.sleep(1)
+ tdSql.query("select count(*) from table_31")
+ tdSql.checkData(0, 0, 3*self.num)
+ tdSql.query("select * from table_31")
+ tdSql.checkRows(3*self.num)
+ tdSql.checkCols(4092)
+
+ #define TSDB_MAX_BYTES_PER_ROW 49151 TSDB_MAX_TAGS_LEN 16384
+ #ts:8\int:4\smallint:2\bigint:8\bool:1\float:4\tinyint:1\nchar:4*()+2[offset]\binary:1*()+2[offset]
+ tdLog.info('test super table max bytes per row 49151')
+ sql = "create table stable_4(ts timestamp, "
+ for i in range(500):
+ sql += "int_%d int, " % (i + 1)
+ for i in range(500,1000):
+ sql += "smallint_%d smallint, " % (i + 1)
+ for i in range(1000,1500):
+ sql += "tinyint_%d tinyint, " % (i + 1)
+ for i in range(1500,2000):
+ sql += "double_%d double, " % (i + 1)
+ for i in range(2000,2500):
+ sql += "float_%d float, " % (i + 1)
+ for i in range(2500,3000):
+ sql += "bool_%d bool, " % (i + 1)
+ for i in range(3000,3500):
+ sql += "bigint_%d bigint, " % (i + 1)
+ for i in range(3500,3800):
+ sql += "nchar_%d nchar(20), " % (i + 1)
+ for i in range(3800,4090):
+ sql += "binary_%d binary(34), " % (i + 1)
+ sql += "col4091 binary(101))"
+ sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) "
+ tdSql.execute(sql)
+ sql = '''create table table_40 using stable_4
+ tags('table_40' , '1' , '2' , '3' );'''
+ tdSql.execute(sql)
+ tdSql.query("select * from table_40")
+ tdSql.checkCols(4092)
+ tdSql.query("describe table_40")
+ tdSql.checkRows(4096)
+
+ tdLog.info('test super table drop and add column or tag')
+ sql = "alter stable stable_4 drop column col4091; "
+ tdSql.execute(sql)
+ sql = "select * from stable_4; "
+ tdSql.query(sql)
+ tdSql.checkCols(4095)
+ sql = "alter table stable_4 add column col4091 binary(102); "
+ tdSql.error(sql)
+ sql = "alter table stable_4 add column col4091 binary(101); "
+ tdSql.execute(sql)
+ sql = "select * from stable_4; "
+ tdSql.query(sql)
+ tdSql.checkCols(4096)
+
+ sql = "alter stable stable_4 drop tag tag_1; "
+ tdSql.execute(sql)
+ sql = "select * from stable_4; "
+ tdSql.query(sql)
+ tdSql.checkCols(4095)
+ sql = "alter table stable_4 add tag tag_1 int; "
+ tdSql.execute(sql)
+ sql = "select * from stable_4; "
+ tdSql.query(sql)
+ tdSql.checkCols(4096)
+ sql = "alter table stable_4 add tag loc1 nchar(10); "
+ tdSql.error(sql)
+
+ tdLog.info('test super table max bytes per row 49151')
+ sql = "create table stable_5(ts timestamp, "
+ for i in range(500):
+ sql += "int_%d int, " % (i + 1)
+ for i in range(500,1000):
+ sql += "smallint_%d smallint, " % (i + 1)
+ for i in range(1000,1500):
+ sql += "tinyint_%d tinyint, " % (i + 1)
+ for i in range(1500,2000):
+ sql += "double_%d double, " % (i + 1)
+ for i in range(2000,2500):
+ sql += "float_%d float, " % (i + 1)
+ for i in range(2500,3000):
+ sql += "bool_%d bool, " % (i + 1)
+ for i in range(3000,3500):
+ sql += "bigint_%d bigint, " % (i + 1)
+ for i in range(3500,3800):
+ sql += "nchar_%d nchar(20), " % (i + 1)
+ for i in range(3800,4090):
+ sql += "binary_%d binary(34), " % (i + 1)
+ sql += "col4091 binary(102))"
+ sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) "
+ tdSql.error(sql)
+
+ print("==============step6, super table error col ==============")
+ tdLog.info('test exceeds row num')
+ # column + tag > 4096
+ sql = "create stable stable_2(ts timestamp, "
+ for i in range(4091):
+ sql += "col%d int, " % (i + 1)
+ sql += "col4092 binary(22))"
+ sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) "
+ tdLog.info(len(sql))
+ tdSql.error(sql)
+
+ # column + tag > 4096
+ sql = "create stable stable_2(ts timestamp, "
+ for i in range(4090):
+ sql += "col%d int, " % (i + 1)
+ sql += "col4091 binary(22))"
+ sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int,tag_4 int) "
+ tdLog.info(len(sql))
+ tdSql.error(sql)
+
+ # alter column + tag > 4096
+ sql = "alter table stable_1 add column max int; "
+ tdSql.error(sql)
+ # TD-5322
+ sql = "alter table stable_1 add tag max int; "
+ tdSql.error(sql)
+ # TD-5324
+ sql = "alter table stable_4 modify column col4091 binary(102); "
+ tdSql.error(sql)
+ sql = "alter table stable_4 modify tag loc nchar(20); "
+ tdSql.query("select * from table_40")
+ tdSql.checkCols(4092)
+ tdSql.query("describe table_40")
+ tdSql.checkRows(4096)
+
+ os.system("rm -rf tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py.sql")
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.csv b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.csv
index 078c3b93dfb91e1e691da3c3da4d3df27d0eedb7..5b30be5b4c4d5c323141097af6207ffb8bb93449 100755
--- a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.csv
+++ b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.csv
@@ -1,10 +1,3 @@
-0,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
-1,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
-2,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
-3,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
-4,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
-5,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
-6,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
-7,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
-8,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
-9,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
\ No newline at end of file
+1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 483, 484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555, 556, 557, 558, 559, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, 595, 596, 597, 598, 599, 600, 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 670, 671, 672, 673, 674, 675, 676, 677, 678, 679, 680, 681, 682, 683, 684, 685, 686, 687, 688, 689, 690, 691, 692, 693, 694, 695, 696, 697, 698, 699, 700, 701, 702, 703, 704, 705, 706, 707, 708, 709, 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 720, 721, 722, 723, 724, 725, 726, 727, 728, 729, 730, 731, 732, 733, 734, 735, 736, 737, 738, 739, 740, 741, 742, 743, 744, 745, 746, 747, 748, 749, 750, 751, 752, 753, 754, 755, 756, 757, 758, 759, 760, 761, 762, 763, 764, 765, 766, 767, 768, 769, 770, 771, 772, 773, 774, 775, 776, 777, 778, 779, 780, 781, 782, 783, 784, 785, 786, 787, 788, 789, 790, 791, 792, 793, 794, 795, 796, 797, 798, 799, 800, 801, 802, 803, 804, 805, 806, 807, 808, 809, 810, 811, 812, 813, 814, 815, 816, 817, 818, 819, 820, 821, 822, 823, 824, 825, 826, 827, 828, 829, 830, 831, 832, 833, 834, 835, 836, 837, 838, 839, 840, 841, 842, 843, 844, 845, 846, 847, 848, 849, 850, 851, 852, 853, 854, 855, 856, 857, 858, 859, 860, 861, 862, 863, 864, 865, 866, 867, 868, 869, 870, 871, 872, 873, 874, 875, 876, 877, 878, 879, 880, 881, 882, 883, 884, 885, 886, 887, 888, 889, 890, 891, 892, 893, 894, 895, 896, 897, 898, 899, 900, 901, 902, 903, 904, 905, 906, 907, 908, 909, 910, 911, 912, 913, 914, 915, 916, 917, 918, 919, 920, 921, 922, 923, 924, 925, 926, 927, 928, 929, 930, 931, 932, 933, 934, 935, 936, 937, 938, 939, 940, 941, 942, 943, 944, 945, 946, 947, 948, 949, 950, 951, 952, 953, 954, 955, 956, 957, 958, 959, 960, 961, 962, 963, 964, 965, 966, 967, 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, 994, 995, 996, 997, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1093, 1094, 1095, 1096, 1097, 1098, 1099, 1100, 1101, 1102, 1103, 1104, 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, 1113, 1114, 1115, 1116, 1117, 1118, 1119, 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129, 1130, 1131, 1132, 1133, 1134, 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, 1143, 1144, 1145, 1146, 1147, 1148, 1149, 1150, 1151, 1152, 1153, 1154, 1155, 1156, 1157, 1158, 1159, 1160, 1161, 1162, 1163, 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1171, 1172, 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, 1192, 1193, 1194, 1195, 1196, 1197, 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, 1221, 1222, 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, 1239, 1240, 1241, 1242, 1243, 1244, 1245, 1246, 1247, 1248, 1249, 1250, 1251, 1252, 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260, 1261, 1262, 1263, 1264, 1265, 1266, 1267, 1268, 1269, 1270, 1271, 1272, 1273, 1274, 1275, 1276, 1277, 1278, 1279, 1280, 1281, 1282, 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, 1295, 1296, 1297, 1298, 1299, 1300, 1301, 1302, 1303, 1304, 1305, 1306, 1307, 1308, 1309, 1310, 1311, 1312, 1313, 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1321, 1322, 1323, 1324, 1325, 1326, 1327, 1328, 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344, 1345, 1346, 1347, 1348, 1349, 1350, 1351, 1352, 1353, 1354, 1355, 1356, 1357, 1358, 1359, 1360, 1361, 1362, 1363, 1364, 1365, 1366, 1367, 1368, 1369, 1370, 1371, 1372, 1373, 1374, 1375, 1376, 1377, 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1405, 1406, 1407, 1408, 1409, 1410, 1411, 1412, 1413, 1414, 1415, 1416, 1417, 1418, 1419, 1420, 1421, 1422, 1423, 1424, 1425, 1426, 1427, 1428, 1429, 1430, 1431, 1432, 1433, 1434, 1435, 1436, 1437, 1438, 1439, 1440, 1441, 1442, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1450, 1451, 1452, 1453, 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, 1462, 1463, 1464, 1465, 1466, 1467, 1468, 1469, 1470, 1471, 1472, 1473, 1474, 1475, 1476, 1477, 1478, 1479, 1480, 1481, 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489, 1490, 1491, 1492, 1493, 1494, 1495, 1496, 1497, 1498, 1499, 1500, 1501, 1502, 1503, 1504, 1505, 1506, 1507, 1508, 1509, 1510, 1511, 1512, 1513, 1514, 1515, 1516, 1517, 1518, 1519, 1520, 1521, 1522, 1523, 1524, 1525, 1526, 1527, 1528, 1529, 1530, 1531, 1532, 1533, 1534, 1535, 1536, 1537, 1538, 1539, 1540, 1541, 1542, 1543, 1544, 1545, 1546, 1547, 1548, 1549, 1550, 1551, 1552, 1553, 1554, 1555, 1556, 1557, 1558, 1559, 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, 1568, 1569, 1570, 1571, 1572, 1573, 1574, 1575, 1576, 1577, 1578, 1579, 1580, 1581, 1582, 1583, 1584, 1585, 1586, 1587, 1588, 1589, 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1597, 1598, 1599, 1600, 1601, 1602, 1603, 1604, 1605, 1606, 1607, 1608, 1609, 1610, 1611, 1612, 1613, 1614, 1615, 1616, 1617, 1618, 1619, 1620, 1621, 1622, 1623, 1624, 1625, 1626, 1627, 1628, 1629, 1630, 1631, 1632, 1633, 1634, 1635, 1636, 1637, 1638, 1639, 1640, 1641, 1642, 1643, 1644, 1645, 1646, 1647, 1648, 1649, 1650, 1651, 1652, 1653, 1654, 1655, 1656, 1657, 1658, 1659, 1660, 1661, 1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, 1670, 1671, 1672, 1673, 1674, 1675, 1676, 1677, 1678, 1679, 1680, 1681, 1682, 1683, 1684, 1685, 1686, 1687, 1688, 1689, 1690, 1691, 1692, 1693, 1694, 1695, 1696, 1697, 1698, 1699, 1700, 1701, 1702, 1703, 1704, 1705, 1706, 1707, 1708, 1709, 1710, 1711, 1712, 1713, 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, 1722, 1723, 1724, 1725, 1726, 1727, 1728, 1729, 1730, 1731, 1732, 1733, 1734, 1735, 1736, 1737, 1738, 1739, 1740, 1741, 1742, 1743, 1744, 1745, 1746, 1747, 1748, 1749, 1750, 1751, 1752, 1753, 1754, 1755, 1756, 1757, 1758, 1759, 1760, 1761, 1762, 1763, 1764, 1765, 1766, 1767, 1768, 1769, 1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777, 1778, 1779, 1780, 1781, 1782, 1783, 1784, 1785, 1786, 1787, 1788, 1789, 1790, 1791, 1792, 1793, 1794, 1795, 1796, 1797, 1798, 1799, 1800, 1801, 1802, 1803, 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1815, 1816, 1817, 1818, 1819, 1820, 1821, 1822, 1823, 1824, 1825, 1826, 1827, 1828, 1829, 1830, 1831, 1832, 1833, 1834, 1835, 1836, 1837, 1838, 1839, 1840, 1841, 1842, 1843, 1844, 1845, 1846, 1847, 1848, 1849, 1850, 1851, 1852, 1853, 1854, 1855, 1856, 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869, 1870, 1871, 1872, 1873, 1874, 1875, 1876, 1877, 1878, 1879, 1880, 1881, 1882, 1883, 1884, 1885, 1886, 1887, 1888, 1889, 1890, 1891, 1892, 1893, 1894, 1895, 1896, 1897, 1898, 1899, 1900, 1901, 1902, 1903, 1904, 1905, 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, 1914, 1915, 1916, 1917, 1918, 1919, 1920, 1921, 1922, 1923, 1924, 1925, 1926, 1927, 1928, 1929, 1930, 1931, 1932, 1933, 1934, 1935, 1936, 1937, 1938, 1939, 1940, 1941, 1942, 1943, 1944, 1945, 1946, 1947, 1948, 1949, 1950, 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 1960, 1961, 1962, 1963, 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029, 2030, 2031, 2032, 2033, 2034, 2035, 2036, 2037, 2038, 2039, 2040, 2041, 2042, 2043, 2044, 2045, 2046, 2047, 2048, 2049, 2050, 2051, 2052, 2053, 2054, 2055, 2056, 2057, 2058, 2059, 2060, 2061, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069, 2070, 2071, 2072, 2073, 2074, 2075, 2076, 2077, 2078, 2079, 2080, 2081, 2082, 2083, 2084, 2085, 2086, 2087, 2088, 2089, 2090, 2091, 2092, 2093, 2094, 2095, 2096, 2097, 2098, 2099, 2100, 2101, 2102, 2103, 2104, 2105, 2106, 2107, 2108, 2109, 2110, 2111, 2112, 2113, 2114, 2115, 2116, 2117, 2118, 2119, 2120, 2121, 2122, 2123, 2124, 2125, 2126, 2127, 2128, 2129, 2130, 2131, 2132, 2133, 2134, 2135, 2136, 2137, 2138, 2139, 2140, 2141, 2142, 2143, 2144, 2145, 2146, 2147, 2148, 2149, 2150, 2151, 2152, 2153, 2154, 2155, 2156, 2157, 2158, 2159, 2160, 2161, 2162, 2163, 2164, 2165, 2166, 2167, 2168, 2169, 2170, 2171, 2172, 2173, 2174, 2175, 2176, 2177, 2178, 2179, 2180, 2181, 2182, 2183, 2184, 2185, 2186, 2187, 2188, 2189, 2190, 2191, 2192, 2193, 2194, 2195, 2196, 2197, 2198, 2199, 2200, 2201, 2202, 2203, 2204, 2205, 2206, 2207, 2208, 2209, 2210, 2211, 2212, 2213, 2214, 2215, 2216, 2217, 2218, 2219, 2220, 2221, 2222, 2223, 2224, 2225, 2226, 2227, 2228, 2229, 2230, 2231, 2232, 2233, 2234, 2235, 2236, 2237, 2238, 2239, 2240, 2241, 2242, 2243, 2244, 2245, 2246, 2247, 2248, 2249, 2250, 2251, 2252, 2253, 2254, 2255, 2256, 2257, 2258, 2259, 2260, 2261, 2262, 2263, 2264, 2265, 2266, 2267, 2268, 2269, 2270, 2271, 2272, 2273, 2274, 2275, 2276, 2277, 2278, 2279, 2280, 2281, 2282, 2283, 2284, 2285, 2286, 2287, 2288, 2289, 2290, 2291, 2292, 2293, 2294, 2295, 2296, 2297, 2298, 2299, 2300, 2301, 2302, 2303, 2304, 2305, 2306, 2307, 2308, 2309, 2310, 2311, 2312, 2313, 2314, 2315, 2316, 2317, 2318, 2319, 2320, 2321, 2322, 2323, 2324, 2325, 2326, 2327, 2328, 2329, 2330, 2331, 2332, 2333, 2334, 2335, 2336, 2337, 2338, 2339, 2340, 2341, 2342, 2343, 2344, 2345, 2346, 2347, 2348, 2349, 2350, 2351, 2352, 2353, 2354, 2355, 2356, 2357, 2358, 2359, 2360, 2361, 2362, 2363, 2364, 2365, 2366, 2367, 2368, 2369, 2370, 2371, 2372, 2373, 2374, 2375, 2376, 2377, 2378, 2379, 2380, 2381, 2382, 2383, 2384, 2385, 2386, 2387, 2388, 2389, 2390, 2391, 2392, 2393, 2394, 2395, 2396, 2397, 2398, 2399, 2400, 2401, 2402, 2403, 2404, 2405, 2406, 2407, 2408, 2409, 2410, 2411, 2412, 2413, 2414, 2415, 2416, 2417, 2418, 2419, 2420, 2421, 2422, 2423, 2424, 2425, 2426, 2427, 2428, 2429, 2430, 2431, 2432, 2433, 2434, 2435, 2436, 2437, 2438, 2439, 2440, 2441, 2442, 2443, 2444, 2445, 2446, 2447, 2448, 2449, 2450, 2451, 2452, 2453, 2454, 2455, 2456, 2457, 2458, 2459, 2460, 2461, 2462, 2463, 2464, 2465, 2466, 2467, 2468, 2469, 2470, 2471, 2472, 2473, 2474, 2475, 2476, 2477, 2478, 2479, 2480, 2481, 2482, 2483, 2484, 2485, 2486, 2487, 2488, 2489, 2490, 2491, 2492, 2493, 2494, 2495, 2496, 2497, 2498, 2499, 2500, 2501, 2502, 2503, 2504, 2505, 2506, 2507, 2508, 2509, 2510, 2511, 2512, 2513, 2514, 2515, 2516, 2517, 2518, 2519, 2520, 2521, 2522, 2523, 2524, 2525, 2526, 2527, 2528, 2529, 2530, 2531, 2532, 2533, 2534, 2535, 2536, 2537, 2538, 2539, 2540, 2541, 2542, 2543, 2544, 2545, 2546, 2547, 2548, 2549, 2550, 2551, 2552, 2553, 2554, 2555, 2556, 2557, 2558, 2559, 2560, 2561, 2562, 2563, 2564, 2565, 2566, 2567, 2568, 2569, 2570, 2571, 2572, 2573, 2574, 2575, 2576, 2577, 2578, 2579, 2580, 2581, 2582, 2583, 2584, 2585, 2586, 2587, 2588, 2589, 2590, 2591, 2592, 2593, 2594, 2595, 2596, 2597, 2598, 2599, 2600, 2601, 2602, 2603, 2604, 2605, 2606, 2607, 2608, 2609, 2610, 2611, 2612, 2613, 2614, 2615, 2616, 2617, 2618, 2619, 2620, 2621, 2622, 2623, 2624, 2625, 2626, 2627, 2628, 2629, 2630, 2631, 2632, 2633, 2634, 2635, 2636, 2637, 2638, 2639, 2640, 2641, 2642, 2643, 2644, 2645, 2646, 2647, 2648, 2649, 2650, 2651, 2652, 2653, 2654, 2655, 2656, 2657, 2658, 2659, 2660, 2661, 2662, 2663, 2664, 2665, 2666, 2667, 2668, 2669, 2670, 2671, 2672, 2673, 2674, 2675, 2676, 2677, 2678, 2679, 2680, 2681, 2682, 2683, 2684, 2685, 2686, 2687, 2688, 2689, 2690, 2691, 2692, 2693, 2694, 2695, 2696, 2697, 2698, 2699, 2700, 2701, 2702, 2703, 2704, 2705, 2706, 2707, 2708, 2709, 2710, 2711, 2712, 2713, 2714, 2715, 2716, 2717, 2718, 2719, 2720, 2721, 2722, 2723, 2724, 2725, 2726, 2727, 2728, 2729, 2730, 2731, 2732, 2733, 2734, 2735, 2736, 2737, 2738, 2739, 2740, 2741, 2742, 2743, 2744, 2745, 2746, 2747, 2748, 2749, 2750, 2751, 2752, 2753, 2754, 2755, 2756, 2757, 2758, 2759, 2760, 2761, 2762, 2763, 2764, 2765, 2766, 2767, 2768, 2769, 2770, 2771, 2772, 2773, 2774, 2775, 2776, 2777, 2778, 2779, 2780, 2781, 2782, 2783, 2784, 2785, 2786, 2787, 2788, 2789, 2790, 2791, 2792, 2793, 2794, 2795, 2796, 2797, 2798, 2799, 2800, 2801, 2802, 2803, 2804, 2805, 2806, 2807, 2808, 2809, 2810, 2811, 2812, 2813, 2814, 2815, 2816, 2817, 2818, 2819, 2820, 2821, 2822, 2823, 2824, 2825, 2826, 2827, 2828, 2829, 2830, 2831, 2832, 2833, 2834, 2835, 2836, 2837, 2838, 2839, 2840, 2841, 2842, 2843, 2844, 2845, 2846, 2847, 2848, 2849, 2850, 2851, 2852, 2853, 2854, 2855, 2856, 2857, 2858, 2859, 2860, 2861, 2862, 2863, 2864, 2865, 2866, 2867, 2868, 2869, 2870, 2871, 2872, 2873, 2874, 2875, 2876, 2877, 2878, 2879, 2880, 2881, 2882, 2883, 2884, 2885, 2886, 2887, 2888, 2889, 2890, 2891, 2892, 2893, 2894, 2895, 2896, 2897, 2898, 2899, 2900, 2901, 2902, 2903, 2904, 2905, 2906, 2907, 2908, 2909, 2910, 2911, 2912, 2913, 2914, 2915, 2916, 2917, 2918, 2919, 2920, 2921, 2922, 2923, 2924, 2925, 2926, 2927, 2928, 2929, 2930, 2931, 2932, 2933, 2934, 2935, 2936, 2937, 2938, 2939, 2940, 2941, 2942, 2943, 2944, 2945, 2946, 2947, 2948, 2949, 2950, 2951, 2952, 2953, 2954, 2955, 2956, 2957, 2958, 2959, 2960, 2961, 2962, 2963, 2964, 2965, 2966, 2967, 2968, 2969, 2970, 2971, 2972, 2973, 2974, 2975, 2976, 2977, 2978, 2979, 2980, 2981, 2982, 2983, 2984, 2985, 2986, 2987, 2988, 2989, 2990, 2991, 2992, 2993, 2994, 2995, 2996, 2997, 2998, 2999, 3000, 3001, 3002, 3003, 3004, 3005, 3006, 3007, 3008, 3009, 3010, 3011, 3012, 3013, 3014, 3015, 3016, 3017, 3018, 3019, 3020, 3021, 3022, 3023, 3024, 3025, 3026, 3027, 3028, 3029, 3030, 3031, 3032, 3033, 3034, 3035, 3036, 3037, 3038, 3039, 3040, 3041, 3042, 3043, 3044, 3045, 3046, 3047, 3048, 3049, 3050, 3051, 3052, 3053, 3054, 3055, 3056, 3057, 3058, 3059, 3060, 3061, 3062, 3063, 3064, 3065, 3066, 3067, 3068, 3069, 3070, 3071, 3072, 3073, 3074, 3075, 3076, 3077, 3078, 3079, 3080, 3081, 3082, 3083, 3084, 3085, 3086, 3087, 3088, 3089, 3090, 3091, 3092, 3093, 3094, 3095, 3096, 3097, 3098, 3099, 3100, 3101, 3102, 3103, 3104, 3105, 3106, 3107, 3108, 3109, 3110, 3111, 3112, 3113, 3114, 3115, 3116, 3117, 3118, 3119, 3120, 3121, 3122, 3123, 3124, 3125, 3126, 3127, 3128, 3129, 3130, 3131, 3132, 3133, 3134, 3135, 3136, 3137, 3138, 3139, 3140, 3141, 3142, 3143, 3144, 3145, 3146, 3147, 3148, 3149, 3150, 3151, 3152, 3153, 3154, 3155, 3156, 3157, 3158, 3159, 3160, 3161, 3162, 3163, 3164, 3165, 3166, 3167, 3168, 3169, 3170, 3171, 3172, 3173, 3174, 3175, 3176, 3177, 3178, 3179, 3180, 3181, 3182, 3183, 3184, 3185, 3186, 3187, 3188, 3189, 3190, 3191, 3192, 3193, 3194, 3195, 3196, 3197, 3198, 3199, 3200, 3201, 3202, 3203, 3204, 3205, 3206, 3207, 3208, 3209, 3210, 3211, 3212, 3213, 3214, 3215, 3216, 3217, 3218, 3219, 3220, 3221, 3222, 3223, 3224, 3225, 3226, 3227, 3228, 3229, 3230, 3231, 3232, 3233, 3234, 3235, 3236, 3237, 3238, 3239, 3240, 3241, 3242, 3243, 3244, 3245, 3246, 3247, 3248, 3249, 3250, 3251, 3252, 3253, 3254, 3255, 3256, 3257, 3258, 3259, 3260, 3261, 3262, 3263, 3264, 3265, 3266, 3267, 3268, 3269, 3270, 3271, 3272, 3273, 3274, 3275, 3276, 3277, 3278, 3279, 3280, 3281, 3282, 3283, 3284, 3285, 3286, 3287, 3288, 3289, 3290, 3291, 3292, 3293, 3294, 3295, 3296, 3297, 3298, 3299, 3300, 3301, 3302, 3303, 3304, 3305, 3306, 3307, 3308, 3309, 3310, 3311, 3312, 3313, 3314, 3315, 3316, 3317, 3318, 3319, 3320, 3321, 3322, 3323, 3324, 3325, 3326, 3327, 3328, 3329, 3330, 3331, 3332, 3333, 3334, 3335, 3336, 3337, 3338, 3339, 3340, 3341, 3342, 3343, 3344, 3345, 3346, 3347, 3348, 3349, 3350, 3351, 3352, 3353, 3354, 3355, 3356, 3357, 3358, 3359, 3360, 3361, 3362, 3363, 3364, 3365, 3366, 3367, 3368, 3369, 3370, 3371, 3372, 3373, 3374, 3375, 3376, 3377, 3378, 3379, 3380, 3381, 3382, 3383, 3384, 3385, 3386, 3387, 3388, 3389, 3390, 3391, 3392, 3393, 3394, 3395, 3396, 3397, 3398, 3399, 3400, 3401, 3402, 3403, 3404, 3405, 3406, 3407, 3408, 3409, 3410, 3411, 3412, 3413, 3414, 3415, 3416, 3417, 3418, 3419, 3420, 3421, 3422, 3423, 3424, 3425, 3426, 3427, 3428, 3429, 3430, 3431, 3432, 3433, 3434, 3435, 3436, 3437, 3438, 3439, 3440, 3441, 3442, 3443, 3444, 3445, 3446, 3447, 3448, 3449, 3450, 3451, 3452, 3453, 3454, 3455, 3456, 3457, 3458, 3459, 3460, 3461, 3462, 3463, 3464, 3465, 3466, 3467, 3468, 3469, 3470, 3471, 3472, 3473, 3474, 3475, 3476, 3477, 3478, 3479, 3480, 3481, 3482, 3483, 3484, 3485, 3486, 3487, 3488, 3489, 3490, 3491, 3492, 3493, 3494, 3495, 3496, 3497, 3498, 3499, 3500, 3501, 3502, 3503, 3504, 3505, 3506, 3507, 3508, 3509, 3510, 3511, 3512, 3513, 3514, 3515, 3516, 3517, 3518, 3519, 3520, 3521, 3522, 3523, 3524, 3525, 3526, 3527, 3528, 3529, 3530, 3531, 3532, 3533, 3534, 3535, 3536, 3537, 3538, 3539, 3540, 3541, 3542, 3543, 3544, 3545, 3546, 3547, 3548, 3549, 3550, 3551, 3552, 3553, 3554, 3555, 3556, 3557, 3558, 3559, 3560, 3561, 3562, 3563, 3564, 3565, 3566, 3567, 3568, 3569, 3570, 3571, 3572, 3573, 3574, 3575, 3576, 3577, 3578, 3579, 3580, 3581, 3582, 3583, 3584, 3585, 3586, 3587, 3588, 3589, 3590, 3591, 3592, 3593, 3594, 3595, 3596, 3597, 3598, 3599, 3600, 3601, 3602, 3603, 3604, 3605, 3606, 3607, 3608, 3609, 3610, 3611, 3612, 3613, 3614, 3615, 3616, 3617, 3618, 3619, 3620, 3621, 3622, 3623, 3624, 3625, 3626, 3627, 3628, 3629, 3630, 3631, 3632, 3633, 3634, 3635, 3636, 3637, 3638, 3639, 3640, 3641, 3642, 3643, 3644, 3645, 3646, 3647, 3648, 3649, 3650, 3651, 3652, 3653, 3654, 3655, 3656, 3657, 3658, 3659, 3660, 3661, 3662, 3663, 3664, 3665, 3666, 3667, 3668, 3669, 3670, 3671, 3672, 3673, 3674, 3675, 3676, 3677, 3678, 3679, 3680, 3681, 3682, 3683, 3684, 3685, 3686, 3687, 3688, 3689, 3690, 3691, 3692, 3693, 3694, 3695, 3696, 3697, 3698, 3699, 3700, 3701, 3702, 3703, 3704, 3705, 3706, 3707, 3708, 3709, 3710, 3711, 3712, 3713, 3714, 3715, 3716, 3717, 3718, 3719, 3720, 3721, 3722, 3723, 3724, 3725, 3726, 3727, 3728, 3729, 3730, 3731, 3732, 3733, 3734, 3735, 3736, 3737, 3738, 3739, 3740, 3741, 3742, 3743, 3744, 3745, 3746, 3747, 3748, 3749, 3750, 3751, 3752, 3753, 3754, 3755, 3756, 3757, 3758, 3759, 3760, 3761, 3762, 3763, 3764, 3765, 3766, 3767, 3768, 3769, 3770, 3771, 3772, 3773, 3774, 3775, 3776, 3777, 3778, 3779, 3780, 3781, 3782, 3783, 3784, 3785, 3786, 3787, 3788, 3789, 3790, 3791, 3792, 3793, 3794, 3795, 3796, 3797, 3798, 3799, 3800, 3801, 3802, 3803, 3804, 3805, 3806, 3807, 3808, 3809, 3810, 3811, 3812, 3813, 3814, 3815, 3816, 3817, 3818, 3819, 3820, 3821, 3822, 3823, 3824, 3825, 3826, 3827, 3828, 3829, 3830, 3831, 3832, 3833, 3834, 3835, 3836, 3837, 3838, 3839, 3840, 3841, 3842, 3843, 3844, 3845, 3846, 3847, 3848, 3849, 3850, 3851, 3852, 3853, 3854, 3855, 3856, 3857, 3858, 3859, 3860, 3861, 3862, 3863, 3864, 3865, 3866, 3867, 3868, 3869, 3870, 3871, 3872, 3873, 3874, 3875, 3876, 3877, 3878, 3879, 3880, 3881, 3882, 3883, 3884, 3885, 3886, 3887, 3888, 3889, 3890, 3891, 3892, 3893, 3894, 3895, 3896, 3897, 3898, 3899, 3900, 3901, 3902, 3903, 3904, 3905, 3906, 3907, 3908, 3909, 3910, 3911, 3912, 3913, 3914, 3915, 3916, 3917, 3918, 3919, 3920, 3921, 3922, 3923, 3924, 3925, 3926, 3927, 3928, 3929, 3930, 3931, 3932, 3933, 3934, 3935, 3936, 3937, 3938, 3939, 3940, 3941, 3942, 3943, 3944, 3945, 3946, 3947, 3948, 3949, 3950, 3951, 3952, 3953, 3954, 3955, 3956, 3957, 3958, 3959, 3960, 3961, 3962, 3963, 3964, 3965, 3966, 3967, 3968, 3969, 3970, 3971, 3972, 3973, 3974, 3975, 3976, 3977, 3978, 3979, 3980, 3981, 3982, 3983, 3984, 3985, 3986, 3987, 3988, 3989, 3990, 3991, 3992, 3993, 3994, 3995, 3996, 3997, 3998, 3999, 4000, 4001, 4002, 4003, 4004, 4005, 4006, 4007, 4008, 4009, 4010, 4011, 4012, 4013, 4014, 4015, 4016, 4017, 4018, 4019, 4020, 4021, 4022, 4023, 4024, 4025, 4026, 4027, 4028, 4029, 4030, 4031, 4032, 4033, 4034, 4035, 4036, 4037, 4038, 4039, 4040, 4041, 4042, 4043, 4044, 4045, 4046, 4047, 4048, 4049, 4050, 4051, 4052, 4053, 4054, 4055, 4056, 4057, 4058, 4059, 4060, 4061, 4062, 4063, 4064, 4065, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073, 4074, 4075, 4076, 4077, 4078, 4079, 4080, 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091
+1,2,3,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL
+1,2,3,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,4,NULL,NULL,NULL,NULL,NULL,NULL,5,NULL,NULL,6,NULL,NULL,NULL,7,NULL,NULL,NULL,8,NULL,NULL,NULL,9,NULL,NULL,10
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json
index e2b15b83a7742c76159617ddd7b7e08103e53537..25af3a1041dbcd06319dd6abfeb82fd33240c013 100755
--- a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json
+++ b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json
@@ -15,7 +15,7 @@
"max_sql_len": 102400000,
"databases": [{
"dbinfo": {
- "name": "db",
+ "name": "json",
"drop": "yes",
"replica": 1,
"days": 10,
@@ -35,13 +35,13 @@
"super_tables": [{
"name": "stb_old",
"child_table_exists":"no",
- "childtable_count": 10,
+ "childtable_count": 1,
"childtable_prefix": "stb_old_",
"auto_create_table": "no",
"batch_create_tbl_num": 5,
- "data_source": "sample",
+ "data_source": "rand",
"insert_mode": "taosc",
- "insert_rows": 100,
+ "insert_rows": 10,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
@@ -55,18 +55,18 @@
"sample_format": "csv",
"sample_file": "./tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.csv",
"tags_file": "",
- "columns": [{"type": "INT","count":4000}, {"type": "BINARY", "len": 16, "count":1}],
+ "columns": [{"type": "INT","count":1000}, {"type": "BINARY", "len": 16, "count":20}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
},{
"name": "stb_new",
"child_table_exists":"no",
- "childtable_count": 10,
+ "childtable_count": 1,
"childtable_prefix": "stb_new_",
"auto_create_table": "no",
"batch_create_tbl_num": 5,
"data_source": "rand",
"insert_mode": "taosc",
- "insert_rows": 100,
+ "insert_rows": 10,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
@@ -80,18 +80,18 @@
"sample_format": "csv",
"sample_file": "./tools/taosdemoAllTest/sample.csv",
"tags_file": "",
- "columns": [{"type": "DOUBLE","count":1020}],
- "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
+ "columns": [{"type": "INT","count":4000}, {"type": "BINARY", "len": 16, "count":90}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":3}]
},{
- "name": "stb_int",
+ "name": "stb_mix",
"child_table_exists":"no",
- "childtable_count": 10,
- "childtable_prefix": "stb_int_",
+ "childtable_count": 1,
+ "childtable_prefix": "stb_mix_",
"auto_create_table": "no",
"batch_create_tbl_num": 5,
"data_source": "rand",
"insert_mode": "taosc",
- "insert_rows": 100,
+ "insert_rows": 10,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
@@ -105,8 +105,33 @@
"sample_format": "csv",
"sample_file": "./tools/taosdemoAllTest/sample.csv",
"tags_file": "",
- "columns": [{"type": "int","count":1020}],
- "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
+ "columns": [{"type": "INT","count":500},{"type": "SMALLINT","count":500},{"type": "TINYINT","count":500},{"type": "DOUBLE","count":500},{"type": "FLOAT","count":500},{"type": "BOOL","count":500},{"type": "BIGINT","count":500},{"type": "NCHAR","len": 20,"count":300},{"type": "BINARY","len": 34,"count":290},{"type": "BINARY","len": 101,"count":1}],
+ "tags": [{"type": "INT", "count":3}, {"type": "NCHAR", "len": 10, "count":1}]
+ },{
+ "name": "stb_excel",
+ "child_table_exists":"no",
+ "childtable_count": 1,
+ "childtable_prefix": "stb_excel_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 5,
+ "data_source": "sample",
+ "insert_mode": "taosc",
+ "insert_rows": 10,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT","count":500},{"type": "SMALLINT","count":500},{"type": "SMALLINT","count":500},{"type": "DOUBLE","count":500},{"type": "FLOAT","count":500},{"type": "BOOL","count":500},{"type": "BIGINT","count":500},{"type": "NCHAR","len": 19,"count":300},{"type": "BINARY","len": 34,"count":290},{"type": "BINARY","len": 101,"count":1}],
+ "tags": [{"type": "INT", "count":3}, {"type": "NCHAR", "len": 10, "count":1}]
}]
}]
}
diff --git a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py
index 78bd0c7e6053c7caff5a7b44b425474e78a0733d..eb844b6fe24338b0301c45b918967faec7debcc0 100755
--- a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py
+++ b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py
@@ -13,6 +13,7 @@
import sys
import os
+import time
from util.log import *
from util.cases import *
from util.sql import *
@@ -23,7 +24,7 @@ class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
-
+
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
@@ -39,7 +40,7 @@ class TDTestCase:
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
-
+
def run(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
@@ -48,86 +49,124 @@ class TDTestCase:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/"
- # insert: create one or mutiple tables per sql and insert multiple rows per sql
+ #-N:regular table -d:database name -t:table num -n:rows num per table -l:col num -y:force
+ #regular old && new
+ startTime = time.time()
+ os.system("%staosdemo -N -d regular_old -t 1 -n 10 -l 1023 -y" % binPath)
+ tdSql.execute("use regular_old")
+ tdSql.query("show tables;")
+ tdSql.checkRows(1)
+ tdSql.query("select * from d0;")
+ tdSql.checkCols(1024)
+ tdSql.query("describe d0;")
+ tdSql.checkRows(1024)
+
+ os.system("%staosdemo -N -d regular_new -t 1 -n 10 -l 4095 -y" % binPath)
+ tdSql.execute("use regular_new")
+ tdSql.query("show tables;")
+ tdSql.checkRows(1)
+ tdSql.query("select * from d0;")
+ tdSql.checkCols(4096)
+ tdSql.query("describe d0;")
+ tdSql.checkRows(4096)
+
+ #super table -d:database name -t:table num -n:rows num per table -l:col num -y:force
+ os.system("%staosdemo -d super_old -t 1 -n 10 -l 1021 -y" % binPath)
+ tdSql.execute("use super_old")
+ tdSql.query("show tables;")
+ tdSql.checkRows(1)
+ tdSql.query("select * from meters;")
+ tdSql.checkCols(1024)
+ tdSql.query("select * from d0;")
+ tdSql.checkCols(1022)
+ tdSql.query("describe meters;")
+ tdSql.checkRows(1024)
+ tdSql.query("describe d0;")
+ tdSql.checkRows(1024)
+
+ os.system("%staosdemo -d super_new -t 1 -n 10 -l 4093 -y" % binPath)
+ tdSql.execute("use super_new")
+ tdSql.query("show tables;")
+ tdSql.checkRows(1)
+ tdSql.query("select * from meters;")
+ tdSql.checkCols(4096)
+ tdSql.query("select * from d0;")
+ tdSql.checkCols(4094)
+ tdSql.query("describe meters;")
+ tdSql.checkRows(4096)
+ tdSql.query("describe d0;")
+ tdSql.checkRows(4096)
+ tdSql.execute("create table stb_new1_1 using meters tags(1,2)")
+ tdSql.query("select * from stb_new1_1")
+ tdSql.checkCols(4094)
+ tdSql.query("describe stb_new1_1;")
+ tdSql.checkRows(4096)
+
+ # insert: create one or mutiple tables per sql and insert multiple rows per sql
# test case for https://jira.taosdata.com:18080/browse/TD-5213
os.system("%staosdemo -f tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json -y " % binPath)
- tdSql.execute("use db")
+ tdSql.execute("use json")
tdSql.query("select count (tbname) from stb_old")
- tdSql.checkData(0, 0, 10)
-
- # tdSql.query("select * from stb_old")
- # tdSql.checkRows(10)
- # tdSql.checkCols(1024)
-
- # tdSql.query("select count (tbname) from stb_new")
- # tdSql.checkData(0, 0, 10)
-
- # tdSql.query("select * from stb_new")
- # tdSql.checkRows(10)
- # tdSql.checkCols(4096)
-
- # tdLog.info("stop dnode to commit data to disk")
- # tdDnodes.stop(1)
- # tdDnodes.start(1)
-
- #regular table
- sql = "create table tb(ts timestamp, "
- for i in range(1022):
- sql += "c%d binary(14), " % (i + 1)
- sql += "c1023 binary(22))"
- tdSql.execute(sql)
-
- for i in range(4):
- sql = "insert into tb values(%d, "
- for j in range(1022):
- str = "'%s', " % self.get_random_string(14)
- sql += str
- sql += "'%s')" % self.get_random_string(22)
- tdSql.execute(sql % (self.ts + i))
-
- time.sleep(10)
- tdSql.query("select count(*) from tb")
- tdSql.checkData(0, 0, 4)
-
- tdDnodes.stop(1)
- tdDnodes.start(1)
-
- time.sleep(1)
- tdSql.query("select count(*) from tb")
- tdSql.checkData(0, 0, 4)
-
-
- sql = "create table tb1(ts timestamp, "
- for i in range(4094):
- sql += "c%d binary(14), " % (i + 1)
- sql += "c4095 binary(22))"
- tdSql.execute(sql)
-
- for i in range(4):
- sql = "insert into tb1 values(%d, "
- for j in range(4094):
- str = "'%s', " % self.get_random_string(14)
- sql += str
- sql += "'%s')" % self.get_random_string(22)
- tdSql.execute(sql % (self.ts + i))
-
- time.sleep(10)
- tdSql.query("select count(*) from tb1")
- tdSql.checkData(0, 0, 4)
-
- tdDnodes.stop(1)
- tdDnodes.start(1)
-
- time.sleep(1)
- tdSql.query("select count(*) from tb1")
- tdSql.checkData(0, 0, 4)
-
-
-
- #os.system("rm -rf tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py.sql")
-
-
-
+ tdSql.checkData(0, 0, 1)
+
+ tdSql.query("select * from stb_old")
+ tdSql.checkRows(10)
+ tdSql.checkCols(1024)
+
+ tdSql.query("select count (tbname) from stb_new")
+ tdSql.checkData(0, 0, 1)
+
+ tdSql.query("select * from stb_new")
+ tdSql.checkRows(10)
+ tdSql.checkCols(4096)
+ tdSql.query("describe stb_new;")
+ tdSql.checkRows(4096)
+ tdSql.query("select * from stb_new_0")
+ tdSql.checkRows(10)
+ tdSql.checkCols(4091)
+ tdSql.query("describe stb_new_0;")
+ tdSql.checkRows(4096)
+ tdSql.execute("create table stb_new1_1 using stb_new tags(1,2,3,4,5)")
+ tdSql.query("select * from stb_new1_1")
+ tdSql.checkCols(4091)
+ tdSql.query("describe stb_new1_1;")
+ tdSql.checkRows(4096)
+
+ tdSql.query("select count (tbname) from stb_mix")
+ tdSql.checkData(0, 0, 1)
+
+ tdSql.query("select * from stb_mix")
+ tdSql.checkRows(10)
+ tdSql.checkCols(4096)
+ tdSql.query("describe stb_mix;")
+ tdSql.checkRows(4096)
+ tdSql.query("select * from stb_mix_0")
+ tdSql.checkRows(10)
+ tdSql.checkCols(4092)
+ tdSql.query("describe stb_mix_0;")
+ tdSql.checkRows(4096)
+
+ tdSql.query("select count (tbname) from stb_excel")
+ tdSql.checkData(0, 0, 1)
+
+ tdSql.query("select * from stb_excel")
+ tdSql.checkRows(10)
+ tdSql.checkCols(4096)
+ tdSql.query("describe stb_excel;")
+ tdSql.checkRows(4096)
+ tdSql.query("select * from stb_excel_0")
+ tdSql.checkRows(10)
+ tdSql.checkCols(4092)
+ tdSql.query("describe stb_excel_0;")
+ tdSql.checkRows(4096)
+ endTime = time.time()
+ print("total time %ds" % (endTime - startTime))
+
+
+ os.system("rm -rf tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py.sql")
+
+
+
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
diff --git a/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar16384.json b/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar49151.json
similarity index 80%
rename from tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar16384.json
rename to tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar49151.json
index cd69badad154c6417d0e8d57f4d252354d40ad6b..4f31351516e927b4ec7638540c0aca70ed54c022 100644
--- a/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar16384.json
+++ b/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar49151.json
@@ -84,11 +84,37 @@
"columns": [{"type": "BINARY", "len": 16370, "count":1},{"type": "INT"}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
+ {
+ "name": "stb3",
+ "child_table_exists":"no",
+ "childtable_count": 1,
+ "childtable_prefix": "stb03_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 12,
+ "data_source": "rand",
+ "insert_mode": "taosc",
+ "insert_rows": 1,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "BINARY", "len": 16371, "count":3},{"type": "INT","count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ },
{
"name": "stb2",
"child_table_exists":"no",
"childtable_count": 1,
- "childtable_prefix": "stb01_",
+ "childtable_prefix": "stb02_",
"auto_create_table": "no",
"batch_create_tbl_num": 12,
"data_source": "rand",
@@ -111,10 +137,10 @@
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
- "name": "stb3",
+ "name": "stb4",
"child_table_exists":"no",
"childtable_count": 1,
- "childtable_prefix": "stb01_",
+ "childtable_prefix": "stb04_",
"auto_create_table": "no",
"batch_create_tbl_num": 12,
"data_source": "rand",
@@ -133,7 +159,7 @@
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
- "columns": [{"type": "BINARY", "len": 16371, "count":1},{"type": "INT"}],
+ "columns": [{"type": "BINARY", "len": 16371, "count":3},{"type": "INT","count":6},{"type": "TINYINT"}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
diff --git a/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNum4096.json b/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNum4096.json
new file mode 100644
index 0000000000000000000000000000000000000000..d9ac2072f1fb5f29f7b5e6540d20d04837e461c2
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNum4096.json
@@ -0,0 +1,62 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 0,
+ "num_of_records_per_req": 1000,
+ "max_sql_len": 10240000000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 10,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "taosc",
+ "insert_rows": 1000,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1004}, {"type": "BINARY", "len": 5, "count":3075}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNumLarge1024.json b/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNumLarge4096.json
similarity index 95%
rename from tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNumLarge1024.json
rename to tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNumLarge4096.json
index 42461b2f6fba85093a6a45883608b49277669568..e5e31f75ef2e7ede4a8d1eb202c298c6952559e4 100644
--- a/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNumLarge1024.json
+++ b/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNumLarge4096.json
@@ -55,7 +55,7 @@
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
- "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1005}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1005}, {"type": "BINARY", "len": 5, "count":3075}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}]
}]
}]
diff --git a/tests/pytest/tools/taosdemoAllTest/insertSigcolumnsNum1024.json b/tests/pytest/tools/taosdemoAllTest/insertSigcolumnsNum4096.json
similarity index 96%
rename from tests/pytest/tools/taosdemoAllTest/insertSigcolumnsNum1024.json
rename to tests/pytest/tools/taosdemoAllTest/insertSigcolumnsNum4096.json
index 7c12a62764ecd129342d916092cf732fe202151f..f1aa981508f063adccd4cf2f5c6166a16deb9a23 100644
--- a/tests/pytest/tools/taosdemoAllTest/insertSigcolumnsNum1024.json
+++ b/tests/pytest/tools/taosdemoAllTest/insertSigcolumnsNum4096.json
@@ -55,7 +55,7 @@
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
- "columns": [{"type": "DOUBLE", "count":1024}],
+ "columns": [{"type": "DOUBLE", "count":4096}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}]
}]
}]
diff --git a/tests/pytest/tools/taosdemoAllTest/nano_samples.csv b/tests/pytest/tools/taosdemoAllTest/nano_samples.csv
new file mode 100644
index 0000000000000000000000000000000000000000..5fc779b41b44eda002d246d9554f0abcea03c8d3
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/nano_samples.csv
@@ -0,0 +1,100 @@
+8.855,"binary_str0" ,1626870128248246976
+8.75,"binary_str1" ,1626870128249060032
+5.44,"binary_str2" ,1626870128249067968
+8.45,"binary_str3" ,1626870128249072064
+4.07,"binary_str4" ,1626870128249075904
+6.97,"binary_str5" ,1626870128249078976
+6.86,"binary_str6" ,1626870128249082048
+1.585,"binary_str7" ,1626870128249085120
+1.4,"binary_str8" ,1626870128249087936
+5.135,"binary_str9" ,1626870128249092032
+3.15,"binary_str10" ,1626870128249095104
+1.765,"binary_str11" ,1626870128249097920
+7.71,"binary_str12" ,1626870128249100992
+3.91,"binary_str13" ,1626870128249104064
+5.615,"binary_str14" ,1626870128249106880
+9.495,"binary_str15" ,1626870128249109952
+3.825,"binary_str16" ,1626870128249113024
+1.94,"binary_str17" ,1626870128249117120
+5.385,"binary_str18" ,1626870128249119936
+7.075,"binary_str19" ,1626870128249123008
+5.715,"binary_str20" ,1626870128249126080
+1.83,"binary_str21" ,1626870128249128896
+6.365,"binary_str22" ,1626870128249131968
+6.55,"binary_str23" ,1626870128249135040
+6.315,"binary_str24" ,1626870128249138112
+3.82,"binary_str25" ,1626870128249140928
+2.455,"binary_str26" ,1626870128249145024
+7.795,"binary_str27" ,1626870128249148096
+2.47,"binary_str28" ,1626870128249150912
+1.37,"binary_str29" ,1626870128249155008
+5.39,"binary_str30" ,1626870128249158080
+5.13,"binary_str31" ,1626870128249160896
+4.09,"binary_str32" ,1626870128249163968
+5.855,"binary_str33" ,1626870128249167040
+0.17,"binary_str34" ,1626870128249170112
+1.955,"binary_str35" ,1626870128249173952
+0.585,"binary_str36" ,1626870128249178048
+0.33,"binary_str37" ,1626870128249181120
+7.925,"binary_str38" ,1626870128249183936
+9.685,"binary_str39" ,1626870128249187008
+2.6,"binary_str40" ,1626870128249191104
+5.705,"binary_str41" ,1626870128249193920
+3.965,"binary_str42" ,1626870128249196992
+4.43,"binary_str43" ,1626870128249200064
+8.73,"binary_str44" ,1626870128249202880
+3.105,"binary_str45" ,1626870128249205952
+9.39,"binary_str46" ,1626870128249209024
+2.825,"binary_str47" ,1626870128249212096
+9.675,"binary_str48" ,1626870128249214912
+9.99,"binary_str49" ,1626870128249217984
+4.51,"binary_str50" ,1626870128249221056
+4.94,"binary_str51" ,1626870128249223872
+7.72,"binary_str52" ,1626870128249226944
+4.135,"binary_str53" ,1626870128249231040
+2.325,"binary_str54" ,1626870128249234112
+4.585,"binary_str55" ,1626870128249236928
+8.76,"binary_str56" ,1626870128249240000
+4.715,"binary_str57" ,1626870128249243072
+0.56,"binary_str58" ,1626870128249245888
+5.35,"binary_str59" ,1626870128249249984
+5.075,"binary_str60" ,1626870128249253056
+6.665,"binary_str61" ,1626870128249256128
+7.13,"binary_str62" ,1626870128249258944
+2.775,"binary_str63" ,1626870128249262016
+5.775,"binary_str64" ,1626870128249265088
+1.62,"binary_str65" ,1626870128249267904
+1.625,"binary_str66" ,1626870128249270976
+8.15,"binary_str67" ,1626870128249274048
+0.75,"binary_str68" ,1626870128249277120
+3.265,"binary_str69" ,1626870128249280960
+8.585,"binary_str70" ,1626870128249284032
+1.88,"binary_str71" ,1626870128249287104
+8.44,"binary_str72" ,1626870128249289920
+5.12,"binary_str73" ,1626870128249295040
+2.58,"binary_str74" ,1626870128249298112
+9.42,"binary_str75" ,1626870128249300928
+1.765,"binary_str76" ,1626870128249304000
+2.66,"binary_str77" ,1626870128249308096
+1.405,"binary_str78" ,1626870128249310912
+5.595,"binary_str79" ,1626870128249315008
+2.28,"binary_str80" ,1626870128249318080
+9.24,"binary_str81" ,1626870128249320896
+9.03,"binary_str82" ,1626870128249323968
+6.055,"binary_str83" ,1626870128249327040
+1.74,"binary_str84" ,1626870128249330112
+5.77,"binary_str85" ,1626870128249332928
+1.97,"binary_str86" ,1626870128249336000
+0.3,"binary_str87" ,1626870128249339072
+7.145,"binary_str88" ,1626870128249342912
+0.88,"binary_str89" ,1626870128249345984
+8.025,"binary_str90" ,1626870128249349056
+4.81,"binary_str91" ,1626870128249351872
+0.725,"binary_str92" ,1626870128249355968
+3.85,"binary_str93" ,1626870128249359040
+9.455,"binary_str94" ,1626870128249362112
+2.265,"binary_str95" ,1626870128249364928
+3.985,"binary_str96" ,1626870128249368000
+9.375,"binary_str97" ,1626870128249371072
+0.2,"binary_str98" ,1626870128249373888
+6.95,"binary_str99" ,1626870128249377984
diff --git a/tests/pytest/tools/taosdemoAllTest/nano_sampletags.csv b/tests/pytest/tools/taosdemoAllTest/nano_sampletags.csv
new file mode 100644
index 0000000000000000000000000000000000000000..18fb855d6d9f55c29325c6ea6f77120effa72884
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/nano_sampletags.csv
@@ -0,0 +1,100 @@
+"string0",7,8.615
+"string1",4,9.895
+"string2",3,2.92
+"string3",3,5.62
+"string4",7,1.615
+"string5",6,1.45
+"string6",5,7.48
+"string7",7,3.01
+"string8",5,4.76
+"string9",10,7.09
+"string10",2,8.38
+"string11",7,8.65
+"string12",5,5.025
+"string13",10,5.765
+"string14",2,4.57
+"string15",2,1.03
+"string16",7,6.98
+"string17",10,0.23
+"string18",7,5.815
+"string19",1,2.37
+"string20",10,8.865
+"string21",3,1.235
+"string22",2,8.62
+"string23",9,1.045
+"string24",8,4.34
+"string25",1,5.455
+"string26",2,4.475
+"string27",1,6.95
+"string28",2,3.39
+"string29",3,6.79
+"string30",7,9.735
+"string31",1,9.79
+"string32",10,9.955
+"string33",1,5.095
+"string34",3,3.86
+"string35",9,5.105
+"string36",10,4.22
+"string37",1,2.78
+"string38",9,6.345
+"string39",1,0.975
+"string40",5,6.16
+"string41",4,7.735
+"string42",5,6.6
+"string43",8,2.845
+"string44",1,0.655
+"string45",3,2.995
+"string46",9,3.6
+"string47",8,3.47
+"string48",3,7.98
+"string49",6,2.225
+"string50",9,5.44
+"string51",4,6.335
+"string52",3,2.955
+"string53",1,0.565
+"string54",6,5.575
+"string55",6,9.905
+"string56",9,6.025
+"string57",8,0.94
+"string58",10,0.15
+"string59",8,1.555
+"string60",4,2.28
+"string61",2,8.29
+"string62",9,6.22
+"string63",6,3.35
+"string64",10,6.7
+"string65",3,9.345
+"string66",7,9.815
+"string67",1,5.365
+"string68",10,3.81
+"string69",1,6.405
+"string70",8,2.715
+"string71",3,8.58
+"string72",8,6.34
+"string73",2,7.49
+"string74",4,8.64
+"string75",3,8.995
+"string76",7,3.465
+"string77",1,7.64
+"string78",6,3.65
+"string79",6,1.4
+"string80",6,5.875
+"string81",2,1.22
+"string82",5,7.87
+"string83",9,8.41
+"string84",9,8.9
+"string85",9,3.89
+"string86",2,5.0
+"string87",2,4.495
+"string88",4,2.835
+"string89",3,5.895
+"string90",7,8.41
+"string91",5,5.125
+"string92",7,9.165
+"string93",5,8.315
+"string94",10,7.485
+"string95",7,4.635
+"string96",2,6.015
+"string97",8,0.595
+"string98",3,8.79
+"string99",4,1.72
diff --git a/tests/pytest/tools/taosdemoAllTest/querrThreads0.json b/tests/pytest/tools/taosdemoAllTest/querrThreads0.json
index 69557a784180acec3c6de059b9285df4d4b31456..3999845dec12042eecd031a4731f3aa8403d067d 100644
--- a/tests/pytest/tools/taosdemoAllTest/querrThreads0.json
+++ b/tests/pytest/tools/taosdemoAllTest/querrThreads0.json
@@ -7,7 +7,7 @@
"password": "taosdata",
"confirm_parameter_prompt": "no",
"databases": "db",
- "query_times":3,
+ "query_times": 3,
"specified_table_query": {
"query_interval": 0,
"concurrent": 1,
@@ -34,4 +34,4 @@
]
}
}
-
\ No newline at end of file
+
diff --git a/tests/pytest/tools/taosdemoAllTest/querrThreadsless0.json b/tests/pytest/tools/taosdemoAllTest/querrThreadsless0.json
index 9074ae8fd1049d2dbaedfff881feefd84583ca20..646cbcfbe21a7fa7fd6f305eadda63fdce00dcf5 100644
--- a/tests/pytest/tools/taosdemoAllTest/querrThreadsless0.json
+++ b/tests/pytest/tools/taosdemoAllTest/querrThreadsless0.json
@@ -7,7 +7,7 @@
"password": "taosdata",
"confirm_parameter_prompt": "no",
"databases": "db",
- "query_times":3,
+ "query_times": 3,
"specified_table_query": {
"query_interval": 0,
"concurrent": 1,
@@ -34,4 +34,4 @@
]
}
}
-
\ No newline at end of file
+
diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tnt1r-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tnt1r-stmt.json
new file mode 100644
index 0000000000000000000000000000000000000000..b3e1024647ff14d0a4a47759e0c9aceab0ac5240
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tnt1r-stmt.json
@@ -0,0 +1,88 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 10,
+ "num_of_records_per_req": 1,
+ "max_sql_len": 1024000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 1000,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 1,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 100,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
+ },
+ {
+ "name": "stb1",
+ "child_table_exists":"no",
+ "childtable_count": 1000,
+ "childtable_prefix": "stb01_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 200,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tntmr-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tntmr-stmt.json
new file mode 100644
index 0000000000000000000000000000000000000000..26d483f57da2c30c7ab5d466f6b0b2cb3e5450b0
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tntmr-stmt.json
@@ -0,0 +1,88 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 10,
+ "num_of_records_per_req": 1000,
+ "max_sql_len": 1024000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 10,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 10000,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
+ },
+ {
+ "name": "stb1",
+ "child_table_exists":"no",
+ "childtable_count": 20,
+ "childtable_prefix": "stb01_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 20000,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-disorder-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-disorder-stmt.json
new file mode 100644
index 0000000000000000000000000000000000000000..b1cd882bbf38545d1a3e7d4999fc4f6e0d5c4025
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-disorder-stmt.json
@@ -0,0 +1,88 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file":"./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 10,
+ "num_of_records_per_req": 1000,
+ "max_sql_len": 1024000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 1,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 10,
+ "childtable_limit": -1,
+ "childtable_offset": 0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 1,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 10,
+ "disorder_range": 100,
+ "timestamp_step": 1000,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ },
+ {
+ "name": "stb1",
+ "child_table_exists":"no",
+ "childtable_count":1,
+ "childtable_prefix": "stb01_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 1,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 10,
+ "childtable_limit": -1,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 100,
+ "disorder_range": 1,
+ "timestamp_step": 1000,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-N00-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-N00-stmt.json
new file mode 100644
index 0000000000000000000000000000000000000000..e541d663fc9f884a7206592271d5124da7746793
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-N00-stmt.json
@@ -0,0 +1,181 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 100,
+ "num_of_records_per_req": 100,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "no",
+ "replica": 1,
+ "days": 10,
+ "cache": 16,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 3650,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb",
+ "child_table_exists":"no",
+ "auto_create_table": "123",
+ "childtable_count": 20,
+ "childtable_prefix": "NN123_",
+ "batch_create_tbl_num": 100,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 5,
+ "childtable_limit": 40,
+ "childtable_offset":0,
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 10,
+ "start_timestamp": "now",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}],
+ "tags": [{"type": "TINYINT"}]
+ },{
+ "name": "stb",
+ "child_table_exists":"no",
+ "auto_create_table": "no",
+ "childtable_count": 20,
+ "childtable_prefix": "NNN_",
+ "batch_create_tbl_num": 100,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 5,
+ "childtable_limit": 40,
+ "childtable_offset":0,
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 10,
+ "start_timestamp": "now",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}],
+ "tags": [{"type": "TINYINT"}]
+ },{
+ "name": "stb",
+ "child_table_exists":"no",
+ "auto_create_table": "yes",
+ "childtable_count": 20,
+ "childtable_prefix": "NNY_",
+ "batch_create_tbl_num": 100,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 5,
+ "childtable_limit": 40,
+ "childtable_offset":0,
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 10,
+ "start_timestamp": "now",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}],
+ "tags": [{"type": "TINYINT"}]
+ },{
+ "name": "stb",
+ "child_table_exists":"yes",
+ "auto_create_table": "123",
+ "childtable_count": 20,
+ "childtable_prefix": "NY123_",
+ "batch_create_tbl_num": 100,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 5,
+ "childtable_limit": 40,
+ "childtable_offset":0,
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 10,
+ "start_timestamp": "now",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}],
+ "tags": [{"type": "TINYINT"}]
+ },{
+ "name": "stb",
+ "child_table_exists":"yes",
+ "auto_create_table": "no",
+ "childtable_count": 20,
+ "childtable_prefix": "NYN_",
+ "batch_create_tbl_num": 100,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 5,
+ "childtable_limit": 40,
+ "childtable_offset":0,
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 10,
+ "start_timestamp": "now",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}],
+ "tags": [{"type": "TINYINT"}]
+ },{
+ "name": "stb",
+ "child_table_exists":"yes",
+ "auto_create_table": "yes",
+ "childtable_count": 20,
+ "childtable_prefix": "NYY_",
+ "batch_create_tbl_num": 100,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 5,
+ "childtable_limit": 40,
+ "childtable_offset":0,
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 10,
+ "start_timestamp": "now",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}],
+ "tags": [{"type": "TINYINT"}]
+ }
+ ]
+ }]
+}
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-Y00-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-Y00-stmt.json
new file mode 100644
index 0000000000000000000000000000000000000000..f32d44240d7f5b717013878358e5d4db378ba354
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-Y00-stmt.json
@@ -0,0 +1,181 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 100,
+ "num_of_records_per_req": 100,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 16,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 3650,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb",
+ "child_table_exists":"no",
+ "auto_create_table": "123",
+ "childtable_count": 20,
+ "childtable_prefix": "YN123_",
+ "batch_create_tbl_num": 100,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 5,
+ "childtable_limit": 40,
+ "childtable_offset":0,
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 10,
+ "start_timestamp": "now",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}],
+ "tags": [{"type": "TINYINT"}]
+ },{
+ "name": "stb",
+ "child_table_exists":"no",
+ "auto_create_table": "no",
+ "childtable_count": 20,
+ "childtable_prefix": "YNN_",
+ "batch_create_tbl_num": 100,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 5,
+ "childtable_limit": 40,
+ "childtable_offset":0,
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 10,
+ "start_timestamp": "now",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}],
+ "tags": [{"type": "TINYINT"}]
+ },{
+ "name": "stb",
+ "child_table_exists":"no",
+ "auto_create_table": "yes",
+ "childtable_count": 20,
+ "childtable_prefix": "YNY_",
+ "batch_create_tbl_num": 100,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 5,
+ "childtable_limit": 40,
+ "childtable_offset":0,
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 10,
+ "start_timestamp": "now",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}],
+ "tags": [{"type": "TINYINT"}]
+ },{
+ "name": "stb",
+ "child_table_exists":"yes",
+ "auto_create_table": "123",
+ "childtable_count": 20,
+ "childtable_prefix": "YY123_",
+ "batch_create_tbl_num": 100,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 5,
+ "childtable_limit": 40,
+ "childtable_offset":0,
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 10,
+ "start_timestamp": "now",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}],
+ "tags": [{"type": "TINYINT"}]
+ },{
+ "name": "stb",
+ "child_table_exists":"yes",
+ "auto_create_table": "no",
+ "childtable_count": 20,
+ "childtable_prefix": "YYN_",
+ "batch_create_tbl_num": 100,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 5,
+ "childtable_limit": 40,
+ "childtable_offset":0,
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 10,
+ "start_timestamp": "now",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}],
+ "tags": [{"type": "TINYINT"}]
+ },{
+ "name": "stb",
+ "child_table_exists":"yes",
+ "auto_create_table": "yes",
+ "childtable_count": 20,
+ "childtable_prefix": "YYY_",
+ "batch_create_tbl_num": 100,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 5,
+ "childtable_limit": 40,
+ "childtable_offset":0,
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 10,
+ "start_timestamp": "now",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}],
+ "tags": [{"type": "TINYINT"}]
+ }
+ ]
+ }]
+}
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-interlace-row-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-interlace-row-stmt.json
new file mode 100644
index 0000000000000000000000000000000000000000..c9d93c2423612b3fb4c6ab1f2b5d577f3c64e8cd
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-interlace-row-stmt.json
@@ -0,0 +1,62 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 100,
+ "num_of_records_per_req": 1000,
+ "max_sql_len": 1024000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 100,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 20,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 150,
+ "childtable_limit": -1,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 151,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-interval-speed-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-interval-speed-stmt.json
new file mode 100644
index 0000000000000000000000000000000000000000..7f94fa2e75b930489dc0106d1796df06af43967f
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-interval-speed-stmt.json
@@ -0,0 +1,88 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 100,
+ "interlace_rows": 0,
+ "num_of_records_per_req": 2000,
+ "max_sql_len": 1024000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 16,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 100,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 20000,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 1000,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ },
+ {
+ "name": "stb1",
+ "child_table_exists":"no",
+ "childtable_count": 100,
+ "childtable_prefix": "stb01_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 20000,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 1000,
+ "insert_interval": 200,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-newdb-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-newdb-stmt.json
new file mode 100644
index 0000000000000000000000000000000000000000..339a2555c87f01b8ec6ce84f018dd4787f39d7fd
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-newdb-stmt.json
@@ -0,0 +1,166 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 0,
+ "num_of_records_per_req": 3000,
+ "max_sql_len": 1024000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 16,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 1
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"yes",
+ "childtable_count": 5,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 10,
+ "childtable_limit": -1,
+ "childtable_offset": 0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ },
+ {
+ "name": "stb1",
+ "child_table_exists":"no",
+ "childtable_count": 6,
+ "childtable_prefix": "stb01_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 20,
+ "childtable_limit": -1,
+ "childtable_offset": 0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ },
+ {
+ "name": "stb2",
+ "child_table_exists":"no",
+ "childtable_count": 7,
+ "childtable_prefix": "stb02_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 20,
+ "childtable_limit": 4,
+ "childtable_offset": 0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ },
+ {
+ "name": "stb3",
+ "child_table_exists":"no",
+ "childtable_count": 8,
+ "childtable_prefix": "stb03_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 20,
+ "childtable_limit": 2,
+ "childtable_offset": 7,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ },
+ {
+ "name": "stb4",
+ "child_table_exists":"no",
+ "childtable_count": 8,
+ "childtable_prefix": "stb04_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 20,
+ "childtable_limit": 0,
+ "childtable_offset": 7,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-newtable-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-newtable-stmt.json
new file mode 100644
index 0000000000000000000000000000000000000000..7e39ddbc0d6233c23d3eb9d5f34e9f0cc6a64360
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-newtable-stmt.json
@@ -0,0 +1,166 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 0,
+ "num_of_records_per_req": 3000,
+ "max_sql_len": 1024000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "no",
+ "replica": 1,
+ "days": 10,
+ "cache": 16,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 1
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"yes",
+ "childtable_count": 5,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 20,
+ "childtable_limit": -1,
+ "childtable_offset": 0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-12-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ },
+ {
+ "name": "stb1",
+ "child_table_exists":"no",
+ "childtable_count": 6,
+ "childtable_prefix": "stb01_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 20,
+ "childtable_limit": -1,
+ "childtable_offset": 0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-12-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ },
+ {
+ "name": "stb2",
+ "child_table_exists":"no",
+ "childtable_count": 7,
+ "childtable_prefix": "stb02_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 20,
+ "childtable_limit": 4,
+ "childtable_offset": 0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-12-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ },
+ {
+ "name": "stb3",
+ "child_table_exists":"no",
+ "childtable_count": 8,
+ "childtable_prefix": "stb03_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 20,
+ "childtable_limit": 2,
+ "childtable_offset": 7,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-12-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ },
+ {
+ "name": "stb4",
+ "child_table_exists":"no",
+ "childtable_count": 8,
+ "childtable_prefix": "stb04_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 30,
+ "childtable_limit": 0,
+ "childtable_offset": 7,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-12-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-nodbnodrop-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-nodbnodrop-stmt.json
new file mode 100644
index 0000000000000000000000000000000000000000..e83a04003324149803f040e61fa6750a20b2afbb
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-nodbnodrop-stmt.json
@@ -0,0 +1,62 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 0,
+ "num_of_records_per_req": 3000,
+ "max_sql_len": 1024000,
+ "databases": [{
+ "dbinfo": {
+ "name": "dbno",
+ "drop": "no",
+ "replica": 1,
+ "days": 10,
+ "cache": 16,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 1
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 5,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "yes",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 10,
+ "childtable_limit": -1,
+ "childtable_offset": 0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-offset-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-offset-stmt.json
new file mode 100644
index 0000000000000000000000000000000000000000..9502358de0e1eb92730dd6782d21bcaba4f67af5
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-offset-stmt.json
@@ -0,0 +1,166 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 0,
+ "num_of_records_per_req": 3000,
+ "max_sql_len": 1024000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "no",
+ "replica": 1,
+ "days": 10,
+ "cache": 16,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"yes",
+ "childtable_count": 5,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 20,
+ "childtable_limit": 0,
+ "childtable_offset": 0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-11-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ },
+ {
+ "name": "stb1",
+ "child_table_exists":"yes",
+ "childtable_count": 6,
+ "childtable_prefix": "stb01_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 20,
+ "childtable_limit": -1,
+ "childtable_offset": 0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-11-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ },
+ {
+ "name": "stb2",
+ "child_table_exists":"yes",
+ "childtable_count": 7,
+ "childtable_prefix": "stb02_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 20,
+ "childtable_limit": 4,
+ "childtable_offset": 0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-11-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ },
+ {
+ "name": "stb3",
+ "child_table_exists":"yes",
+ "childtable_count": 8,
+ "childtable_prefix": "stb03_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 20,
+ "childtable_limit": 2,
+ "childtable_offset":7,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-11-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ },
+ {
+ "name": "stb4",
+ "child_table_exists":"yes",
+ "childtable_count": 8,
+ "childtable_prefix": "stb04_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 20,
+ "childtable_limit": 0,
+ "childtable_offset": 7,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-11-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-renewdb-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-renewdb-stmt.json
new file mode 100644
index 0000000000000000000000000000000000000000..5a500a12580e2fbe9aca206f962304f3310adb3f
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-renewdb-stmt.json
@@ -0,0 +1,166 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 0,
+ "num_of_records_per_req": 3000,
+ "max_sql_len": 1024000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 16,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 1
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"yes",
+ "childtable_count": 5,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 10,
+ "childtable_limit": -1,
+ "childtable_offset": 0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ },
+ {
+ "name": "stb1",
+ "child_table_exists":"no",
+ "childtable_count": 6,
+ "childtable_prefix": "stb01_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 20,
+ "childtable_limit": -1,
+ "childtable_offset": 0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ },
+ {
+ "name": "stb2",
+ "child_table_exists":"no",
+ "childtable_count": 7,
+ "childtable_prefix": "stb02_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 20,
+ "childtable_limit": 4,
+ "childtable_offset": 0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ },
+ {
+ "name": "stb3",
+ "child_table_exists":"no",
+ "childtable_count": 8,
+ "childtable_prefix": "stb03_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 20,
+ "childtable_limit": 2,
+ "childtable_offset": 7,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ },
+ {
+ "name": "stb4",
+ "child_table_exists":"no",
+ "childtable_count": 8,
+ "childtable_prefix": "stb04_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 20,
+ "childtable_limit": 0,
+ "childtable_offset": 7,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-sample-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-sample-stmt.json
new file mode 100644
index 0000000000000000000000000000000000000000..c3f11bf03dad7b7bbc25e2af16488bbd0719bf02
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-sample-stmt.json
@@ -0,0 +1,88 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file":"./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 10,
+ "num_of_records_per_req": 1000,
+ "max_sql_len": 1024000,
+ "databases": [{
+ "dbinfo": {
+ "name": "dbtest123",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 1,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "sample",
+ "insert_mode": "stmt",
+ "insert_rows": 10,
+ "childtable_limit": -1,
+ "childtable_offset": 0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./tools/taosdemoAllTest/sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT", "count":3}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}, {"type": "BOOL"}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ },
+ {
+ "name": "stb1",
+ "child_table_exists":"no",
+ "childtable_count":2,
+ "childtable_prefix": "stb01_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 10,
+ "childtable_limit": -1,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 10,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "./tools/taosdemoAllTest/tags.csv",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":3}, {"type": "BINARY", "len": 16, "count":2}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-timestep-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-timestep-stmt.json
new file mode 100644
index 0000000000000000000000000000000000000000..d2143366d7c3928495d5a4ef6f83edb5014670f4
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-timestep-stmt.json
@@ -0,0 +1,88 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file":"./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 10,
+ "num_of_records_per_req": 1000,
+ "max_sql_len": 1024000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 10,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 20,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ },
+ {
+ "name": "stb1",
+ "child_table_exists":"no",
+ "childtable_count":20,
+ "childtable_prefix": "stb01_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 20,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 10,
+ "start_timestamp": "2020-11-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertBinaryLenLarge16374AllcolLar49151-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertBinaryLenLarge16374AllcolLar49151-stmt.json
new file mode 100644
index 0000000000000000000000000000000000000000..c6909c6278cdbc6fd85eea04fb7e4e859f6df5cd
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertBinaryLenLarge16374AllcolLar49151-stmt.json
@@ -0,0 +1,166 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 10,
+ "num_of_records_per_req": 10240000000,
+ "max_sql_len": 10240000000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 1,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 1,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "BINARY", "len": 16374, "count":1}],
+ "tags": [{"type": "TINYINT", "count":12}, {"type": "BINARY", "len": 16, "count":2}]
+ },
+ {
+ "name": "stb1",
+ "child_table_exists":"no",
+ "childtable_count": 1,
+ "childtable_prefix": "stb01_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 12,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 1,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "BINARY", "len": 16370, "count":1},{"type": "INT"}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ },
+ {
+ "name": "stb3",
+ "child_table_exists":"no",
+ "childtable_count": 1,
+ "childtable_prefix": "stb03_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 12,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 1,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "BINARY", "len": 16371, "count":3},{"type": "INT","count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ },
+ {
+ "name": "stb2",
+ "child_table_exists":"no",
+ "childtable_count": 1,
+ "childtable_prefix": "stb02_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 12,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 1,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "BINARY", "len": 16375, "count":1},{"type": "INT"}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ },
+ {
+ "name": "stb4",
+ "child_table_exists":"no",
+ "childtable_count": 1,
+ "childtable_prefix": "stb04_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 12,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 1,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 100,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "BINARY", "len": 16371, "count":3},{"type": "INT","count":6},{"type": "TINYINT"}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTab0-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTab0-stmt.json
new file mode 100644
index 0000000000000000000000000000000000000000..a5cc009ffb4a5f769d63b8fc4ad1d74f04a76c4b
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTab0-stmt.json
@@ -0,0 +1,88 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 10,
+ "num_of_records_per_req": 10,
+ "max_sql_len": 10240000000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 0,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 1,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "BINARY", "len": 1, "count":1}],
+ "tags": [{"type": "TINYINT", "count":1}, {"type": "BINARY", "len": 16, "count":2}]
+ },
+ {
+ "name": "stb1",
+ "child_table_exists":"no",
+ "childtable_count": 10,
+ "childtable_prefix": "stb01_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 12,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 2,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "BINARY", "len": 1, "count":1}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTabLess0-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTabLess0-stmt.json
new file mode 100644
index 0000000000000000000000000000000000000000..d9678a58692af75e06c77451028151658f812a77
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTabLess0-stmt.json
@@ -0,0 +1,88 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 10,
+ "num_of_records_per_req": 10,
+ "max_sql_len": 10240000000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": -1,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 1,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "BINARY", "len": 1, "count":1}],
+ "tags": [{"type": "TINYINT", "count":1}, {"type": "BINARY", "len": 16, "count":2}]
+ },
+ {
+ "name": "stb1",
+ "child_table_exists":"no",
+ "childtable_count": 10,
+ "childtable_prefix": "stb01_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 12,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 2,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "BINARY", "len": 1, "count":1}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsAndTagNum4096-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsAndTagNum4096-stmt.json
new file mode 100644
index 0000000000000000000000000000000000000000..a448750f74b5ad7219c5f29d744729777f497053
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsAndTagNum4096-stmt.json
@@ -0,0 +1,62 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 0,
+ "num_of_records_per_req": 1000,
+ "max_sql_len": 10240000000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 10,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 1000,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1004}, {"type": "BINARY", "len": 5, "count":3075}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsNum0-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsNum0-stmt.json
new file mode 100644
index 0000000000000000000000000000000000000000..4ec18c49d6c4614f55947d5ab3b9d9a9a84579af
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsNum0-stmt.json
@@ -0,0 +1,62 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 10,
+ "num_of_records_per_req": 100,
+ "max_sql_len": 10240000000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 10,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 1000,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":0}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNum1024.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertInterlaceRowsLarge1M-stmt.json
similarity index 96%
rename from tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNum1024.json
rename to tests/pytest/tools/taosdemoAllTest/stmt/insertInterlaceRowsLarge1M-stmt.json
index 42f6ef2f2fe90f7eac23778542475f152794a509..c9dad3dc7f95a7b95682621103c945dff395d3b5 100644
--- a/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNum1024.json
+++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertInterlaceRowsLarge1M-stmt.json
@@ -40,12 +40,12 @@
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
- "insert_mode": "taosc",
+ "insert_mode": "stmt",
"insert_rows": 1000,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
- "interlace_rows": 0,
+ "interlace_rows": 1000,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertMaxNumPerReq-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertMaxNumPerReq-stmt.json
new file mode 100644
index 0000000000000000000000000000000000000000..00c346678f884a06a0611116ad13e47117bad59f
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertMaxNumPerReq-stmt.json
@@ -0,0 +1,86 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 50000,
+ "num_of_records_per_req": 50000,
+ "max_sql_len": 1025000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 3650,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 100,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 100,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows":50000,
+ "childtable_limit": -1,
+ "childtable_offset":0,
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1025000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2012-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "TINYINT", "count":1}],
+ "tags": [{"type": "TINYINT", "count":1}]
+ },
+ {
+ "name": "stb1",
+ "child_table_exists":"no",
+ "childtable_count": 100,
+ "childtable_prefix": "stb01_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 100,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows":50000,
+ "childtable_limit": -1,
+ "childtable_offset":0,
+ "interlace_rows": 32767,
+ "insert_interval":0,
+ "max_sql_len": 1025000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2012-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "TINYINT", "count":1}],
+ "tags": [{"type": "TINYINT", "count":1}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReq0-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReq0-stmt.json
new file mode 100644
index 0000000000000000000000000000000000000000..4e47b3b404847a267f47413f6ab297e35cc84b0b
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReq0-stmt.json
@@ -0,0 +1,88 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 10,
+ "num_of_records_per_req": 0,
+ "max_sql_len": 10240000000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 1,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 1,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "BINARY", "len": 1, "count":1}],
+ "tags": [{"type": "TINYINT", "count":1}, {"type": "BINARY", "len": 16, "count":2}]
+ },
+ {
+ "name": "stb1",
+ "child_table_exists":"no",
+ "childtable_count": 2,
+ "childtable_prefix": "stb01_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 12,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 2,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "BINARY", "len": 1, "count":1}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReqless0-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReqless0-stmt.json
new file mode 100644
index 0000000000000000000000000000000000000000..28e7bbb39bb5d2477842129936ed6584e617e25a
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReqless0-stmt.json
@@ -0,0 +1,88 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 10,
+ "num_of_records_per_req": -1,
+ "max_sql_len": 10240000000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 1,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 1,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "BINARY", "len": 1, "count":1}],
+ "tags": [{"type": "TINYINT", "count":1}, {"type": "BINARY", "len": 16, "count":2}]
+ },
+ {
+ "name": "stb1",
+ "child_table_exists":"no",
+ "childtable_count": 2,
+ "childtable_prefix": "stb01_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 12,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 2,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "BINARY", "len": 1, "count":1}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertSigcolumnsNum4096-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertSigcolumnsNum4096-stmt.json
new file mode 100644
index 0000000000000000000000000000000000000000..39e38afefd7060b6c6a0241521029e84816b999b
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertSigcolumnsNum4096-stmt.json
@@ -0,0 +1,62 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 10,
+ "num_of_records_per_req": 100,
+ "max_sql_len": 10240000000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 10,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 1000,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "DOUBLE", "count":4096}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertTagsNumLarge128-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertTagsNumLarge128-stmt.json
new file mode 100644
index 0000000000000000000000000000000000000000..f219d3c7a57146a075599eff495ffe93533373ef
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertTagsNumLarge128-stmt.json
@@ -0,0 +1,62 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 10,
+ "num_of_records_per_req": 1000000,
+ "max_sql_len": 1024000000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db1",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 10,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 1000,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 10000,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BIGINT", "count":1}, {"type": "float", "count":1}, {"type": "double", "count":1}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":127}, {"type": "BINARY", "len": 16, "count":2}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertTimestepMulRowsLargeint16-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertTimestepMulRowsLargeint16-stmt.json
new file mode 100644
index 0000000000000000000000000000000000000000..ed3eb280f6869bed76de72bdf50b646bca4a245a
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertTimestepMulRowsLargeint16-stmt.json
@@ -0,0 +1,65 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "localhost",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "databases": [
+ {
+ "dbinfo": {
+ "name": "blf",
+ "drop": "yes"
+ },
+ "super_tables": [
+ {
+ "name": "p_0_topics",
+ "child_table_exists": "no",
+ "childtable_count": 10,
+ "childtable_prefix": "p_0_topics_",
+ "auto_create_table": "no",
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 525600,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 1000,
+ "max_sql_len": 1048576,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 60000,
+ "start_timestamp": "2019-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [
+ {
+ "type": "INT",
+ "count": 1
+ },
+ {
+ "type": "FLOAT",
+ "count": 1
+ },
+ {
+ "type": "BINARY",
+ "len": 12,
+ "count": 1
+ }
+ ],
+ "tags": [
+ {
+ "type": "BINARY",
+ "len": 12,
+ "count": 10
+ }
+ ]
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/nsertColumnsAndTagNumLarge4096-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/nsertColumnsAndTagNumLarge4096-stmt.json
new file mode 100644
index 0000000000000000000000000000000000000000..2105398d55b80f14f2fcfcd08f752333e27c031c
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/stmt/nsertColumnsAndTagNumLarge4096-stmt.json
@@ -0,0 +1,62 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 10000,
+ "num_of_records_per_req": 10000,
+ "max_sql_len": 10240000000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 10,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "stmt",
+ "insert_rows": 1000,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1005}, {"type": "BINARY", "len": 16, "count":3075}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertMSDB.json b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertMSDB.json
new file mode 100644
index 0000000000000000000000000000000000000000..49ab6f3a4367b4cebd840bb24b43a5d190c0d464
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertMSDB.json
@@ -0,0 +1,63 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 10,
+ "thread_count_create_tbl": 10,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 100,
+ "num_of_records_per_req": 1000,
+ "max_sql_len": 1024000,
+ "databases": [{
+ "dbinfo": {
+ "name": "testdb3",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 36,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 100,
+ "childtable_prefix": "tb0_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 20,
+ "data_source": "rand",
+ "insert_mode": "taosc",
+ "insert_rows": 100,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1000,
+ "start_timestamp": "2021-07-01 00:00:00.000",
+ "sample_format": "",
+ "sample_file": "",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2},
+ {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1},
+ {"type": "BOOL"},{"type": "NCHAR","len":16}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}]
+ }]
+ }]
+}
+
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertNanoDB.json b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertNanoDB.json
new file mode 100644
index 0000000000000000000000000000000000000000..9a35df917dcbb2600852e8172da0be3ffacb0d15
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertNanoDB.json
@@ -0,0 +1,63 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 10,
+ "thread_count_create_tbl": 10,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 100,
+ "num_of_records_per_req": 1000,
+ "max_sql_len": 1024000,
+ "databases": [{
+ "dbinfo": {
+ "name": "testdb1",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ns",
+ "keep": 36,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 100,
+ "childtable_prefix": "tb0_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 20,
+ "data_source": "rand",
+ "insert_mode": "taosc",
+ "insert_rows": 100,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1000,
+ "start_timestamp": "2021-07-01 00:00:00.000",
+ "sample_format": "",
+ "sample_file": "",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2},
+ {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1},
+ {"type": "BOOL"},{"type": "NCHAR","len":16}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}]
+ }]
+ }]
+}
+
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertUSDB.json b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertUSDB.json
new file mode 100644
index 0000000000000000000000000000000000000000..631179dbaebfff29de6b38831b78fede989369d4
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertUSDB.json
@@ -0,0 +1,63 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 10,
+ "thread_count_create_tbl": 10,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 100,
+ "num_of_records_per_req": 1000,
+ "max_sql_len": 1024000,
+ "databases": [{
+ "dbinfo": {
+ "name": "testdb2",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "us",
+ "keep": 36,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 100,
+ "childtable_prefix": "tb0_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 20,
+ "data_source": "rand",
+ "insert_mode": "taosc",
+ "insert_rows": 100,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1000,
+ "start_timestamp": "2021-07-01 00:00:00.000",
+ "sample_format": "",
+ "sample_file": "",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2},
+ {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1},
+ {"type": "BOOL"},{"type": "NCHAR","len":16}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}]
+ }]
+ }]
+}
+
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertTime_step.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertTime_step.py
new file mode 100644
index 0000000000000000000000000000000000000000..7b3b865df91f87622737eede640ec79e880e433b
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertTime_step.py
@@ -0,0 +1,115 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import os
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root)-len("/build/bin")]
+ break
+ return buildPath
+
+ def run(self):
+ buildPath = self.getBuildPath()
+ if (buildPath == ""):
+ tdLog.exit("taosd not found!")
+ else:
+ tdLog.info("taosd found in %s" % buildPath)
+ binPath = buildPath+ "/build/bin/"
+
+ # insert: create one or mutiple tables per sql and insert multiple rows per sql
+
+ # check the params of taosdemo about time_step is nano
+ os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoInsertNanoDB.json -y " % binPath)
+ tdSql.execute("use testdb1")
+ tdSql.query("show stables")
+ tdSql.checkData(0, 4, 100)
+ tdSql.query("select count (tbname) from stb0")
+ tdSql.checkData(0, 0, 100)
+ tdSql.query("select count(*) from tb0_0")
+ tdSql.checkData(0, 0, 100)
+ tdSql.query("select count(*) from stb0")
+ tdSql.checkData(0, 0, 10000)
+ tdSql.query("describe stb0")
+ tdSql.getData(9, 1)
+ tdSql.checkDataType(9, 1,"TIMESTAMP")
+ tdSql.query("select last(ts) from stb0")
+ tdSql.checkData(0, 0,"2021-07-01 00:00:00.000099000")
+
+ # check the params of taosdemo about time_step is us
+ os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoInsertUSDB.json -y " % binPath)
+ tdSql.execute("use testdb2")
+ tdSql.query("show stables")
+ tdSql.checkData(0, 4, 100)
+ tdSql.query("select count (tbname) from stb0")
+ tdSql.checkData(0, 0, 100)
+ tdSql.query("select count(*) from tb0_0")
+ tdSql.checkData(0, 0, 100)
+ tdSql.query("select count(*) from stb0")
+ tdSql.checkData(0, 0, 10000)
+ tdSql.query("describe stb0")
+ tdSql.getData(9, 1)
+ tdSql.checkDataType(9, 1,"TIMESTAMP")
+ tdSql.query("select last(ts) from stb0")
+ tdSql.checkData(0, 0,"2021-07-01 00:00:00.099000")
+
+ # check the params of taosdemo about time_step is ms
+ os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoInsertMSDB.json -y " % binPath)
+ tdSql.execute("use testdb3")
+ tdSql.query("show stables")
+ tdSql.checkData(0, 4, 100)
+ tdSql.query("select count (tbname) from stb0")
+ tdSql.checkData(0, 0, 100)
+ tdSql.query("select count(*) from tb0_0")
+ tdSql.checkData(0, 0, 100)
+ tdSql.query("select count(*) from stb0")
+ tdSql.checkData(0, 0, 10000)
+ tdSql.query("describe stb0")
+ tdSql.checkDataType(9, 1,"TIMESTAMP")
+ tdSql.query("select last(ts) from stb0")
+ tdSql.checkData(0, 0,"2021-07-01 00:01:39.000")
+
+
+ os.system("rm -rf ./res.txt")
+ os.system("rm -rf ./*.py.sql")
+
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
index d9d6cc1082e9eac9ef3a900152bcbb8b77942c61..a0b669d5f12e9ba8e2052f82c2d6d8ac349bd017 100644
--- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
@@ -170,16 +170,17 @@ class TDTestCase:
# insert: let parament in json file is illegal, it'll expect error.
- tdSql.execute("drop database if exists db")
- os.system("%staosdemo -f tools/taosdemoAllTest/insertColumnsAndTagNumLarge1024.json -y " % binPath)
+ tdSql.execute("drop database if exists db")
+ os.system("%staosdemo -f tools/taosdemoAllTest/insertColumnsAndTagNumLarge4096.json -y " % binPath)
tdSql.error("use db")
- tdSql.execute("drop database if exists db")
- os.system("%staosdemo -f tools/taosdemoAllTest/insertSigcolumnsNum1024.json -y " % binPath)
+ tdSql.execute("drop database if exists db")
+ os.system("%staosdemo -f tools/taosdemoAllTest/insertSigcolumnsNum4096.json -y " % binPath)
tdSql.error("select * from db.stb0")
- tdSql.execute("drop database if exists db")
- os.system("%staosdemo -f tools/taosdemoAllTest/insertColumnsAndTagNum1024.json -y " % binPath)
+ tdSql.execute("drop database if exists db")
+ os.system("%staosdemo -f tools/taosdemoAllTest/insertColumnsAndTagNum4096.json -y " % binPath)
tdSql.query("select count(*) from db.stb0")
- tdSql.checkData(0, 0, 10000)
+ tdSql.checkData(0, 0, 10000)
+
tdSql.execute("drop database if exists db")
os.system("%staosdemo -f tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json -y " % binPath)
tdSql.query("select count(*) from db.stb0")
@@ -189,33 +190,35 @@ class TDTestCase:
tdSql.execute("use db")
tdSql.query("show stables like 'stb0%' ")
tdSql.checkData(0, 2, 11)
- tdSql.execute("drop database if exists db")
- os.system("%staosdemo -f tools/taosdemoAllTest/insertTagsNumLarge128.json -y " % binPath)
- tdSql.error("use db1")
- tdSql.execute("drop database if exists db")
- os.system("%staosdemo -f tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar16384.json -y " % binPath)
- tdSql.query("select count(*) from db.stb0")
+ tdSql.execute("drop database if exists db")
+ os.system("%staosdemo -f tools/taosdemoAllTest/insertTagsNumLarge128.json -y " % binPath)
+ tdSql.error("use db1")
+ tdSql.execute("drop database if exists db")
+ os.system("%staosdemo -f tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar49151.json -y " % binPath)
+ tdSql.query("select count(*) from db.stb0")
tdSql.checkRows(1)
tdSql.query("select count(*) from db.stb1")
tdSql.checkRows(1)
- tdSql.error("select * from db.stb3")
+ tdSql.error("select * from db.stb4")
tdSql.error("select * from db.stb2")
- tdSql.execute("drop database if exists db")
- os.system("%staosdemo -f tools/taosdemoAllTest/insertNumOfrecordPerReq0.json -y " % binPath)
- tdSql.error("select count(*) from db.stb0")
- tdSql.execute("drop database if exists db")
- os.system("%staosdemo -f tools/taosdemoAllTest/insertNumOfrecordPerReqless0.json -y " % binPath)
- tdSql.error("use db")
- tdSql.execute("drop database if exists db")
- os.system("%staosdemo -f tools/taosdemoAllTest/insertChildTab0.json -y " % binPath)
- tdSql.error("use db")
- tdSql.execute("drop database if exists db")
- os.system("%staosdemo -f tools/taosdemoAllTest/insertChildTabLess0.json -y " % binPath)
- tdSql.error("use db")
- tdSql.execute("drop database if exists blf")
- os.system("%staosdemo -f tools/taosdemoAllTest/insertTimestepMulRowsLargeint16.json -y " % binPath)
- tdSql.execute("use blf")
- tdSql.query("select ts from blf.p_0_topics_7 limit 262800,1")
+ tdSql.query("select count(*) from db.stb3")
+ tdSql.checkRows(1)
+ tdSql.execute("drop database if exists db")
+ os.system("%staosdemo -f tools/taosdemoAllTest/insertNumOfrecordPerReq0.json -y " % binPath)
+ tdSql.error("select count(*) from db.stb0")
+ tdSql.execute("drop database if exists db")
+ os.system("%staosdemo -f tools/taosdemoAllTest/insertNumOfrecordPerReqless0.json -y " % binPath)
+ tdSql.error("use db")
+ tdSql.execute("drop database if exists db")
+ os.system("%staosdemo -f tools/taosdemoAllTest/insertChildTab0.json -y " % binPath)
+ tdSql.error("use db")
+ tdSql.execute("drop database if exists db")
+ os.system("%staosdemo -f tools/taosdemoAllTest/insertChildTabLess0.json -y " % binPath)
+ tdSql.error("use db")
+ tdSql.execute("drop database if exists blf")
+ os.system("%staosdemo -f tools/taosdemoAllTest/insertTimestepMulRowsLargeint16.json -y " % binPath)
+ tdSql.execute("use blf")
+ tdSql.query("select ts from blf.p_0_topics_7 limit 262800,1")
tdSql.checkData(0, 0, "2020-03-31 12:00:00.000")
tdSql.query("select first(ts) from blf.p_0_topics_2")
tdSql.checkData(0, 0, "2019-10-01 00:00:00")
@@ -316,10 +319,12 @@ class TDTestCase:
tdSql.query('show tables like \'YYY%\'') #child_table_exists = yes, auto_create_table varies = yes
tdSql.checkRows(20)
+ testcaseFilename = os.path.split(__file__)[-1]
os.system("rm -rf ./insert_res.txt")
- os.system("rm -rf tools/taosdemoAllTest/taosdemoTestInsertWithJson.py.sql")
-
-
+ os.system("rm -rf tools/taosdemoAllTest/%s.sql" % testcaseFilename )
+
+
+
def stop(self):
tdSql.close()
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py
new file mode 100644
index 0000000000000000000000000000000000000000..0aade4318390b43d8781cdac3deff3f1d7623b10
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py
@@ -0,0 +1,316 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import os
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root)-len("/build/bin")]
+ break
+ return buildPath
+
+ def run(self):
+ buildPath = self.getBuildPath()
+ if (buildPath == ""):
+ tdLog.exit("taosd not found!")
+ else:
+ tdLog.info("taosd found in %s" % buildPath)
+ binPath = buildPath+ "/build/bin/"
+
+ # insert: create one or mutiple tables per sql and insert multiple rows per sql
+ os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-1s1tnt1r-stmt.json -y " % binPath)
+ tdSql.execute("use db")
+ tdSql.query("select count (tbname) from stb0")
+ tdSql.checkData(0, 0, 1000)
+ tdSql.query("select count (tbname) from stb1")
+ tdSql.checkData(0, 0, 1000)
+ tdSql.query("select count(*) from stb00_0")
+ tdSql.checkData(0, 0, 100)
+ tdSql.query("select count(*) from stb0")
+ tdSql.checkData(0, 0, 100000)
+ tdSql.query("select count(*) from stb01_1")
+ tdSql.checkData(0, 0, 200)
+ tdSql.query("select count(*) from stb1")
+ tdSql.checkData(0, 0, 200000)
+
+
+ # insert: create mutiple tables per sql and insert one rows per sql .
+ os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-1s1tntmr-stmt.json -y " % binPath)
+ tdSql.execute("use db")
+ tdSql.query("select count (tbname) from stb0")
+ tdSql.checkData(0, 0, 10)
+ tdSql.query("select count (tbname) from stb1")
+ tdSql.checkData(0, 0, 20)
+ tdSql.query("select count(*) from stb00_0")
+ tdSql.checkData(0, 0, 10000)
+ tdSql.query("select count(*) from stb0")
+ tdSql.checkData(0, 0, 100000)
+ tdSql.query("select count(*) from stb01_0")
+ tdSql.checkData(0, 0, 20000)
+ tdSql.query("select count(*) from stb1")
+ tdSql.checkData(0, 0, 400000)
+
+ # insert: using parament "insert_interval to controls spped of insert.
+ # but We need to have accurate methods to control the speed, such as getting the speed value, checking the count and so on。
+ os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-interval-speed-stmt.json -y" % binPath)
+ tdSql.execute("use db")
+ tdSql.query("show stables")
+ tdSql.checkData(0, 4, 100)
+ tdSql.query("select count(*) from stb00_0")
+ tdSql.checkData(0, 0, 20000)
+ tdSql.query("select count(*) from stb0")
+ tdSql.checkData(0, 0, 2000000)
+ tdSql.query("show stables")
+ tdSql.checkData(1, 4, 100)
+ tdSql.query("select count(*) from stb01_0")
+ tdSql.checkData(0, 0, 20000)
+ tdSql.query("select count(*) from stb1")
+ tdSql.checkData(0, 0, 2000000)
+
+ # spend 2min30s for 3 testcases.
+ # insert: drop and child_table_exists combination test
+ # insert: using parament "childtable_offset and childtable_limit" to control table'offset point and offset
+ os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-nodbnodrop-stmt.json -y" % binPath)
+ tdSql.error("show dbno.stables")
+ os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-newdb-stmt.json -y" % binPath)
+ tdSql.execute("use db")
+ tdSql.query("select count (tbname) from stb0")
+ tdSql.checkData(0, 0, 5)
+ tdSql.query("select count (tbname) from stb1")
+ tdSql.checkData(0, 0, 6)
+ tdSql.query("select count (tbname) from stb2")
+ tdSql.checkData(0, 0, 7)
+ tdSql.query("select count (tbname) from stb3")
+ tdSql.checkData(0, 0, 8)
+ tdSql.query("select count (tbname) from stb4")
+ tdSql.checkData(0, 0, 8)
+ os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-offset-stmt.json -y" % binPath)
+ tdSql.execute("use db")
+ tdSql.query("select count(*) from stb0")
+ tdSql.checkData(0, 0, 50)
+ tdSql.query("select count(*) from stb1")
+ tdSql.checkData(0, 0, 240)
+ tdSql.query("select count(*) from stb2")
+ tdSql.checkData(0, 0, 220)
+ tdSql.query("select count(*) from stb3")
+ tdSql.checkData(0, 0, 180)
+ tdSql.query("select count(*) from stb4")
+ tdSql.checkData(0, 0, 160)
+ os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-newtable-stmt.json -y" % binPath)
+ tdSql.execute("use db")
+ tdSql.query("select count(*) from stb0")
+ tdSql.checkData(0, 0, 150)
+ tdSql.query("select count(*) from stb1")
+ tdSql.checkData(0, 0, 360)
+ tdSql.query("select count(*) from stb2")
+ tdSql.checkData(0, 0, 360)
+ tdSql.query("select count(*) from stb3")
+ tdSql.checkData(0, 0, 340)
+ tdSql.query("select count(*) from stb4")
+ tdSql.checkData(0, 0, 400)
+ os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-renewdb-stmt.json -y" % binPath)
+ tdSql.execute("use db")
+ tdSql.query("select count(*) from stb0")
+ tdSql.checkData(0, 0, 50)
+ tdSql.query("select count(*) from stb1")
+ tdSql.checkData(0, 0, 120)
+ tdSql.query("select count(*) from stb2")
+ tdSql.checkData(0, 0, 140)
+ tdSql.query("select count(*) from stb3")
+ tdSql.checkData(0, 0, 160)
+ tdSql.query("select count(*) from stb4")
+ tdSql.checkData(0, 0, 160)
+
+
+ # insert: let parament in json file is illegal, it'll expect error.
+ tdSql.execute("drop database if exists db")
+ os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertColumnsAndTagNumLarge4096-stmt.json -y " % binPath)
+ tdSql.error("use db")
+ tdSql.execute("drop database if exists db")
+ os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertSigcolumnsNum4096-stmt.json -y " % binPath)
+ tdSql.error("select * from db.stb0")
+ # tdSql.execute("drop database if exists db")
+ # os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertColumnsAndTagNum4096-stmt.json -y " % binPath)
+ # tdSql.query("select count(*) from db.stb0")
+ # tdSql.checkData(0, 0, 10000)
+ tdSql.execute("drop database if exists db")
+ os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertInterlaceRowsLarge1M-stmt.json -y " % binPath)
+ tdSql.query("select count(*) from db.stb0")
+ tdSql.checkRows(0)
+ tdSql.execute("drop database if exists db")
+ os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertColumnsNum0-stmt.json -y " % binPath)
+ tdSql.execute("use db")
+ tdSql.query("show stables like 'stb0%' ")
+ tdSql.checkData(0, 2, 11)
+ tdSql.execute("drop database if exists db")
+ os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertTagsNumLarge128-stmt.json -y " % binPath)
+ tdSql.error("use db1")
+ tdSql.execute("drop database if exists db")
+ os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertBinaryLenLarge16374AllcolLar49151-stmt.json -y " % binPath)
+ tdSql.query("select count(*) from db.stb0")
+ tdSql.checkRows(1)
+ tdSql.query("select count(*) from db.stb1")
+ tdSql.checkRows(1)
+ tdSql.error("select * from db.stb4")
+ tdSql.error("select * from db.stb2")
+ tdSql.query("select count(*) from db.stb3")
+ tdSql.checkRows(1)
+ tdSql.execute("drop database if exists db")
+ os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertNumOfrecordPerReq0-stmt.json -y " % binPath)
+ tdSql.error("select count(*) from db.stb0")
+ tdSql.execute("drop database if exists db")
+ os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertNumOfrecordPerReqless0-stmt.json -y " % binPath)
+ tdSql.error("use db")
+ tdSql.execute("drop database if exists db")
+ os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertChildTab0-stmt.json -y " % binPath)
+ tdSql.error("use db")
+ tdSql.execute("drop database if exists db")
+ os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertChildTabLess0-stmt.json -y " % binPath)
+ tdSql.error("use db")
+ tdSql.execute("drop database if exists blf")
+ os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertTimestepMulRowsLargeint16-stmt.json -y " % binPath)
+ tdSql.execute("use blf")
+ tdSql.query("select ts from blf.p_0_topics_7 limit 262800,1")
+ tdSql.checkData(0, 0, "2020-03-31 12:00:00.000")
+ tdSql.query("select first(ts) from blf.p_0_topics_2")
+ tdSql.checkData(0, 0, "2019-10-01 00:00:00")
+ tdSql.query("select last(ts) from blf.p_0_topics_6 ")
+ tdSql.checkData(0, 0, "2020-09-29 23:59:00")
+ os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertMaxNumPerReq-stmt.json -y " % binPath)
+ tdSql.execute("use db")
+ tdSql.query("select count(*) from stb0")
+ tdSql.checkData(0, 0, 5000000)
+ tdSql.query("select count(*) from stb1")
+ tdSql.checkData(0, 0, 5000000)
+
+
+ # insert: timestamp and step
+ os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-timestep-stmt.json -y " % binPath)
+ tdSql.execute("use db")
+ tdSql.query("show stables")
+ tdSql.query("select count (tbname) from stb0")
+ tdSql.checkData(0, 0, 10)
+ tdSql.query("select count (tbname) from stb1")
+ tdSql.checkData(0, 0, 20)
+ tdSql.query("select last(ts) from db.stb00_0")
+ tdSql.checkData(0, 0, "2020-10-01 00:00:00.019000")
+ tdSql.query("select count(*) from stb0")
+ tdSql.checkData(0, 0, 200)
+ tdSql.query("select last(ts) from db.stb01_0")
+ tdSql.checkData(0, 0, "2020-11-01 00:00:00.190000")
+ tdSql.query("select count(*) from stb1")
+ tdSql.checkData(0, 0, 400)
+
+ # # insert: disorder_ratio
+ os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-disorder-stmt.json 2>&1 -y " % binPath)
+ tdSql.execute("use db")
+ tdSql.query("select count (tbname) from stb0")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select count (tbname) from stb1")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select count(*) from stb0")
+ tdSql.checkData(0, 0, 10)
+ tdSql.query("select count(*) from stb1")
+ tdSql.checkData(0, 0, 10)
+
+ # insert: sample json
+ os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-sample-stmt.json -y " % binPath)
+ tdSql.execute("use dbtest123")
+ tdSql.query("select c2 from stb0")
+ tdSql.checkData(0, 0, 2147483647)
+ tdSql.query("select * from stb1 where t1=-127")
+ tdSql.checkRows(20)
+ tdSql.query("select * from stb1 where t2=127")
+ tdSql.checkRows(10)
+ tdSql.query("select * from stb1 where t2=126")
+ tdSql.checkRows(10)
+
+ # insert: test interlace parament
+ os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-interlace-row-stmt.json -y " % binPath)
+ tdSql.execute("use db")
+ tdSql.query("select count (tbname) from stb0")
+ tdSql.checkData(0, 0, 100)
+ tdSql.query("select count (*) from stb0")
+ tdSql.checkData(0, 0, 15000)
+
+
+ # insert: auto_create
+
+ tdSql.execute('drop database if exists db')
+ tdSql.execute('create database db')
+ tdSql.execute('use db')
+ os.system("%staosdemo -y -f tools/taosdemoAllTest/stmt/insert-drop-exist-auto-N00-stmt.json " % binPath) # drop = no, child_table_exists, auto_create_table varies
+ tdSql.execute('use db')
+ tdSql.query('show tables like \'NN123%\'') #child_table_exists = no, auto_create_table varies = 123
+ tdSql.checkRows(20)
+ tdSql.query('show tables like \'NNN%\'') #child_table_exists = no, auto_create_table varies = no
+ tdSql.checkRows(20)
+ tdSql.query('show tables like \'NNY%\'') #child_table_exists = no, auto_create_table varies = yes
+ tdSql.checkRows(20)
+ tdSql.query('show tables like \'NYN%\'') #child_table_exists = yes, auto_create_table varies = no
+ tdSql.checkRows(0)
+ tdSql.query('show tables like \'NY123%\'') #child_table_exists = yes, auto_create_table varies = 123
+ tdSql.checkRows(0)
+ tdSql.query('show tables like \'NYY%\'') #child_table_exists = yes, auto_create_table varies = yes
+ tdSql.checkRows(0)
+
+ tdSql.execute('drop database if exists db')
+ os.system("%staosdemo -y -f tools/taosdemoAllTest/stmt/insert-drop-exist-auto-Y00-stmt.json " % binPath) # drop = yes, child_table_exists, auto_create_table varies
+ tdSql.execute('use db')
+ tdSql.query('show tables like \'YN123%\'') #child_table_exists = no, auto_create_table varies = 123
+ tdSql.checkRows(20)
+ tdSql.query('show tables like \'YNN%\'') #child_table_exists = no, auto_create_table varies = no
+ tdSql.checkRows(20)
+ tdSql.query('show tables like \'YNY%\'') #child_table_exists = no, auto_create_table varies = yes
+ tdSql.checkRows(20)
+ tdSql.query('show tables like \'YYN%\'') #child_table_exists = yes, auto_create_table varies = no
+ tdSql.checkRows(20)
+ tdSql.query('show tables like \'YY123%\'') #child_table_exists = yes, auto_create_table varies = 123
+ tdSql.checkRows(20)
+ tdSql.query('show tables like \'YYY%\'') #child_table_exists = yes, auto_create_table varies = yes
+ tdSql.checkRows(20)
+
+ testcaseFilename = os.path.split(__file__)[-1]
+ os.system("rm -rf ./insert_res.txt")
+ os.system("rm -rf tools/taosdemoAllTest/%s.sql" % testcaseFilename )
+
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json
new file mode 100644
index 0000000000000000000000000000000000000000..246f1c35f29973fc20602284b37ae68de23f70c1
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json
@@ -0,0 +1,88 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 10,
+ "thread_count_create_tbl": 10,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 100,
+ "num_of_records_per_req": 1000,
+ "max_sql_len": 1024000,
+ "databases": [{
+ "dbinfo": {
+ "name": "nsdb",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ns",
+ "keep": 36,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 100,
+ "childtable_prefix": "tb0_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 20,
+ "data_source": "rand",
+ "insert_mode": "taosc",
+ "insert_rows": 100,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 10000000,
+ "start_timestamp": "2021-07-01 00:00:00.000",
+ "sample_format": "",
+ "sample_file": "",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2},
+ {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1},
+ {"type": "BOOL"},{"type": "NCHAR","len":16}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}]
+ },
+ {
+ "name": "stb1",
+ "child_table_exists":"no",
+ "childtable_count": 100,
+ "childtable_prefix": "tb1_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 20,
+ "data_source": "rand",
+ "insert_mode": "taosc",
+ "insert_rows": 100,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 10,
+ "disorder_range": 1000,
+ "timestamp_step": 10000000,
+ "start_timestamp": "2021-07-01 00:00:00.000",
+ "sample_format": "",
+ "sample_file": "",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2},
+ {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1},
+ {"type": "BOOL"},{"type": "NCHAR","len":16}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json
new file mode 100644
index 0000000000000000000000000000000000000000..0726f3905de2b254b49be51a7973d34b5eb6757e
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json
@@ -0,0 +1,84 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 10,
+ "thread_count_create_tbl": 10,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 100,
+ "num_of_records_per_req": 1000,
+ "max_sql_len": 1024000,
+ "databases": [{
+ "dbinfo": {
+ "name": "subnsdb",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ns",
+ "keep": 36,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 10,
+ "childtable_prefix": "tb0_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 20,
+ "data_source": "samples",
+ "insert_mode": "taosc",
+ "insert_rows": 10,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 10000000,
+ "start_timestamp": "2021-07-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./tools/taosdemoAllTest/nano_samples.csv",
+ "tags_file": "./tools/taosdemoAllTest/nano_sampletags.csv",
+ "columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}],
+ "tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}]
+ },
+ {
+ "name": "stb1",
+ "child_table_exists":"no",
+ "childtable_count": 10,
+ "childtable_prefix": "tb1_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 20,
+ "data_source": "samples",
+ "insert_mode": "taosc",
+ "insert_rows": 10,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 10,
+ "disorder_range": 1000,
+ "timestamp_step": 10000000,
+ "start_timestamp": "2021-07-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./tools/taosdemoAllTest/nano_samples.csv",
+ "tags_file": "./tools/taosdemoAllTest/nano_sampletags.csv",
+ "columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}],
+ "tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json
new file mode 100644
index 0000000000000000000000000000000000000000..f36b1f9b4c1b83707b9482428d4303a5418ad2c3
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json
@@ -0,0 +1,62 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 10,
+ "thread_count_create_tbl": 10,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 100,
+ "num_of_records_per_req": 1000,
+ "max_sql_len": 1024000,
+ "databases": [{
+ "dbinfo": {
+ "name": "nsdb2",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ns",
+ "keep": 36,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 100,
+ "childtable_prefix": "tb0_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 20,
+ "data_source": "rand",
+ "insert_mode": "taosc",
+ "insert_rows": 100,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 10,
+ "start_timestamp": "now",
+ "sample_format": "",
+ "sample_file": "",
+ "tags_file": "",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2},
+ {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1},
+ {"type": "BOOL"},{"type": "NCHAR","len":16}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json
new file mode 100644
index 0000000000000000000000000000000000000000..867619ed8c1497e76077f96d257dd09a489d9eb7
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json
@@ -0,0 +1,84 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 10,
+ "thread_count_create_tbl": 10,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 100,
+ "num_of_records_per_req": 1000,
+ "max_sql_len": 1024000,
+ "databases": [{
+ "dbinfo": {
+ "name": "nsdbcsv",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ns",
+ "keep": 36,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 100,
+ "childtable_prefix": "tb0_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 20,
+ "data_source": "samples",
+ "insert_mode": "taosc",
+ "insert_rows": 100,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 10000000,
+ "start_timestamp": "2021-07-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./tools/taosdemoAllTest/nano_samples.csv",
+ "tags_file": "./tools/taosdemoAllTest/nano_sampletags.csv",
+ "columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}],
+ "tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}]
+ },
+ {
+ "name": "stb1",
+ "child_table_exists":"no",
+ "childtable_count": 100,
+ "childtable_prefix": "tb1_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 20,
+ "data_source": "samples",
+ "insert_mode": "taosc",
+ "insert_rows": 100,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 10,
+ "disorder_range": 1000,
+ "timestamp_step": 10000000,
+ "start_timestamp": "2021-07-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./tools/taosdemoAllTest/nano_samples.csv",
+ "tags_file": "./tools/taosdemoAllTest/nano_sampletags.csv",
+ "columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}],
+ "tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py
new file mode 100644
index 0000000000000000000000000000000000000000..266a8fa712cc3f305eb1ef4a7e358fbfe3f43bb0
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py
@@ -0,0 +1,156 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import os
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root)-len("/build/bin")]
+ break
+ return buildPath
+
+ def run(self):
+ buildPath = self.getBuildPath()
+ if (buildPath == ""):
+ tdLog.exit("taosd not found!")
+ else:
+ tdLog.info("taosd found in %s" % buildPath)
+ binPath = buildPath+ "/build/bin/"
+
+
+ # insert: create one or mutiple tables per sql and insert multiple rows per sql
+ # insert data from a special timestamp
+ # check stable stb0
+
+ os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabase.json -y " % binPath)
+ tdSql.execute("use nsdb")
+ tdSql.query("show stables")
+ tdSql.checkData(0, 4, 100)
+ tdSql.query("select count (tbname) from stb0")
+ tdSql.checkData(0, 0, 100)
+ tdSql.query("select count(*) from tb0_0")
+ tdSql.checkData(0, 0, 100)
+ tdSql.query("select count(*) from stb0")
+ tdSql.checkData(0, 0, 10000)
+ tdSql.query("describe stb0")
+ tdSql.checkDataType(9, 1,"TIMESTAMP")
+ tdSql.query("select last(ts) from stb0")
+ tdSql.checkData(0, 0,"2021-07-01 00:00:00.990000000")
+
+ # check stable stb1 which is insert with disord
+
+ tdSql.query("select count (tbname) from stb1")
+ tdSql.checkData(0, 0, 100)
+ tdSql.query("select count(*) from tb1_0")
+ tdSql.checkData(0, 0, 100)
+ tdSql.query("select count(*) from stb1")
+ tdSql.checkData(0, 0, 10000)
+ # check c8 is an nano timestamp
+ tdSql.query("describe stb1")
+ tdSql.checkDataType(9, 1,"TIMESTAMP")
+ # check insert timestamp_step is nano_second
+ tdSql.query("select last(ts) from stb1")
+ tdSql.checkData(0, 0,"2021-07-01 00:00:00.990000000")
+
+ # insert data from now time
+
+ # check stable stb0
+ os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json -y " % binPath)
+
+ tdSql.execute("use nsdb2")
+ tdSql.query("show stables")
+ tdSql.checkData(0, 4, 100)
+ tdSql.query("select count (tbname) from stb0")
+ tdSql.checkData(0, 0, 100)
+ tdSql.query("select count(*) from tb0_0")
+ tdSql.checkData(0, 0, 100)
+ tdSql.query("select count(*) from stb0")
+ tdSql.checkData(0, 0, 10000)
+ # check c8 is an nano timestamp
+ tdSql.query("describe stb0")
+ tdSql.checkDataType(9,1,"TIMESTAMP")
+
+ # insert by csv files and timetamp is long int , strings in ts and cols
+
+ os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json -y " % binPath)
+ tdSql.execute("use nsdbcsv")
+ tdSql.query("show stables")
+ tdSql.checkData(0, 4, 100)
+ tdSql.query("select count(*) from stb0")
+ tdSql.checkData(0, 0, 10000)
+ tdSql.query("describe stb0")
+ tdSql.checkDataType(3, 1, "TIMESTAMP")
+ tdSql.query("select count(*) from stb0 where ts > \"2021-07-01 00:00:00.490000000\"")
+ tdSql.checkData(0, 0, 5000)
+ tdSql.query("select count(*) from stb0 where ts < 1626918583000000000")
+ tdSql.checkData(0, 0, 10000)
+
+ os.system("rm -rf ./insert_res.txt")
+ os.system("rm -rf tools/taosdemoAllTest/taosdemoTestSupportNano*.py.sql")
+
+ # taosdemo test insert with command and parameter , detals show taosdemo --help
+ os.system("%staosdemo -u root -P taosdata -p 6030 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath)
+ tdSql.query("select count(*) from test.meters")
+ tdSql.checkData(0, 0, 600)
+ # check taosdemo -s
+
+ sqls_ls = ['drop database if exists nsdbsql;','create database nsdbsql precision "ns" keep 36 days 6 update 1;',
+ 'use nsdbsql;','CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int);',
+ 'CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2);',
+ 'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32);',
+ 'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 85, 32, 0.76);']
+
+ with open("./taosdemoTestNanoCreateDB.sql",mode ="a" ) as sql_files:
+ for sql in sqls_ls:
+ sql_files.write(sql+"\n")
+ sql_files.close()
+
+ sleep(10)
+
+ os.system("%staosdemo -s taosdemoTestNanoCreateDB.sql -y " % binPath)
+ tdSql.query("select count(*) from nsdbsql.meters")
+ tdSql.checkData(0, 0, 2)
+
+ os.system("rm -rf ./res.txt")
+ os.system("rm -rf ./*.py.sql")
+ os.system("rm -rf ./taosdemoTestNanoCreateDB.sql")
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.json
new file mode 100644
index 0000000000000000000000000000000000000000..fff1017588bb10f55a82aa2bd7bc6997df71abfd
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.json
@@ -0,0 +1,92 @@
+{
+ "filetype": "query",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "confirm_parameter_prompt": "no",
+ "databases": "nsdb",
+ "query_times": 10,
+ "query_mode": "taosc",
+ "specified_table_query": {
+ "query_interval": 1,
+ "concurrent": 2,
+ "sqls": [
+ {
+ "sql": "select count(*) from stb0 where ts>\"2021-07-01 00:01:00.000000000 \" ;",
+ "result": "./query_res0.txt"
+ },
+ {
+ "sql": "select count(*) from stb0 where ts>\"2021-07-01 00:01:00.000000000\" and ts <=\"2021-07-01 00:01:10.000000000\" ;",
+ "result": "./query_res1.txt"
+ },
+ {
+ "sql": "select count(*) from stb0 where ts>now-20d ;",
+ "result": "./query_res2.txt"
+ },
+ {
+ "sql": "select max(c10) from stb0;",
+ "result": "./query_res3.txt"
+ },
+ {
+ "sql": "select min(c1) from stb0;",
+ "result": "./query_res4.txt"
+ },
+ {
+ "sql": "select avg(c1) from stb0;",
+ "result": "./query_res5.txt"
+ },
+ {
+ "sql":"select count(*) from stb0 group by tbname;",
+ "result":"./query_res6.txt"
+ }
+
+ ]
+ },
+ "super_table_query": {
+ "stblname": "stb0",
+ "query_interval": 0,
+ "threads": 4,
+ "sqls": [
+ {
+ "sql": "select count(*) from xxxx where ts>\"2021-07-01 00:01:00.000000000 \" ;",
+ "result": "./query_res_tb0.txt"
+ },
+ {
+ "sql":"select count(*) from xxxx where ts>\"2021-07-01 00:01:00.000000000\" and ts <=\"2021-07-01 00:01:10.000000000\" ;",
+ "result": "./query_res_tb1.txt"
+ },
+ {
+ "sql":"select first(*) from xxxx ;",
+ "result": "./query_res_tb2.txt"
+ },
+ {
+ "sql":"select last(*) from xxxx;",
+ "result": "./query_res_tb3.txt"
+
+ },
+ {
+ "sql":"select last_row(*) from xxxx ;",
+ "result": "./query_res_tb4.txt"
+
+ },
+ {
+ "sql":"select max(c10) from xxxx ;",
+ "result": "./query_res_tb5.txt"
+
+ },
+ {
+ "sql":"select min(c1) from xxxx ;",
+ "result": "./query_res_tb6.txt"
+
+ },
+ {
+ "sql":"select avg(c10) from xxxx ;",
+ "result": "./query_res_tb7.txt"
+
+ }
+
+ ]
+ }
+ }
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py
new file mode 100644
index 0000000000000000000000000000000000000000..5a37cf9c7cf3153a7bcabb0bc9258063e5f05f09
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py
@@ -0,0 +1,157 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import os
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root)-len("/build/bin")]
+ break
+ return buildPath
+
+ def run(self):
+ buildPath = self.getBuildPath()
+ if (buildPath == ""):
+ tdLog.exit("taosd not found!")
+ else:
+ tdLog.info("taosd found in %s" % buildPath)
+ binPath = buildPath+ "/build/bin/"
+
+ # query: query test for nanoSecond with where and max min groupby order
+ os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabase.json -y " % binPath)
+
+ tdSql.execute("use nsdb")
+
+ # use where to filter
+
+ tdSql.query("select count(*) from stb0 where ts>\"2021-07-01 00:00:00.590000000 \" ")
+ tdSql.checkData(0, 0, 4000)
+ tdSql.query("select count(*) from stb0 where ts>\"2021-07-01 00:00:00.000000000\" and ts <=\"2021-07-01 00:00:00.590000000\" ")
+ tdSql.checkData(0, 0, 5900)
+
+ tdSql.query("select count(*) from tb0_0 where ts>\"2021-07-01 00:00:00.590000000 \" ;")
+ tdSql.checkData(0, 0, 40)
+ tdSql.query("select count(*) from tb0_0 where ts>\"2021-07-01 00:00:00.000000000\" and ts <=\"2021-07-01 00:00:00.590000000\" ")
+ tdSql.checkData(0, 0, 59)
+
+
+ # select max min avg from special col
+ tdSql.query("select max(c10) from stb0;")
+ print("select max(c10) from stb0 : " , tdSql.getData(0, 0))
+
+ tdSql.query("select max(c10) from tb0_0;")
+ print("select max(c10) from tb0_0 : " , tdSql.getData(0, 0))
+
+
+ tdSql.query("select min(c1) from stb0;")
+ print( "select min(c1) from stb0 : " , tdSql.getData(0, 0))
+
+ tdSql.query("select min(c1) from tb0_0;")
+ print( "select min(c1) from tb0_0 : " , tdSql.getData(0, 0))
+
+ tdSql.query("select avg(c1) from stb0;")
+ print( "select avg(c1) from stb0 : " , tdSql.getData(0, 0))
+
+ tdSql.query("select avg(c1) from tb0_0;")
+ print( "select avg(c1) from tb0_0 : " , tdSql.getData(0, 0))
+
+ tdSql.query("select count(*) from stb0 group by tbname;")
+ tdSql.checkData(0, 0, 100)
+ tdSql.checkData(10, 0, 100)
+
+ # query : query above sqls by taosdemo and continuously
+
+ os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.json -y " % binPath)
+
+
+ os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json -y " % binPath)
+ tdSql.execute("use nsdbcsv")
+ tdSql.query("show stables")
+ tdSql.checkData(0, 4, 100)
+ tdSql.query("select count(*) from stb0")
+ tdSql.checkData(0, 0, 10000)
+ tdSql.query("describe stb0")
+ tdSql.checkDataType(3, 1, "TIMESTAMP")
+ tdSql.query("select count(*) from stb0 where ts >\"2021-07-01 00:00:00.490000000\"")
+ tdSql.checkData(0, 0, 5000)
+ tdSql.query("select count(*) from stb0 where ts 162687012800000000')
+ tdSql.execute('select count(*) from stb0 where c2 < 162687012800000000')
+ tdSql.execute('select count(*) from stb0 where c2 = 162687012800000000')
+ tdSql.execute('select count(*) from stb0 where c2 != 162687012800000000')
+ tdSql.execute('select count(*) from stb0 where c2 <> 162687012800000000')
+ tdSql.execute('select count(*) from stb0 where c2 > "2021-07-21 20:22:08.248246976"')
+ tdSql.execute('select count(*) from stb0 where c2 < "2021-07-21 20:22:08.248246976"')
+ tdSql.execute('select count(*) from stb0 where c2 = "2021-07-21 20:22:08.248246976"')
+ tdSql.execute('select count(*) from stb0 where c2 != "2021-07-21 20:22:08.248246976"')
+ tdSql.execute('select count(*) from stb0 where c2 <> "2021-07-21 20:22:08.248246976"')
+ tdSql.execute('select count(*) from stb0 where ts between "2021-07-01 00:00:00.000000000" and "2021-07-01 00:00:00.990000000"')
+ tdSql.execute('select count(*) from stb0 where ts between 1625068800000000000 and 1625068801000000000')
+ tdSql.query('select avg(c0) from stb0 interval(5000000000b)')
+ tdSql.checkRows(1)
+
+ tdSql.query('select avg(c0) from stb0 interval(100000000b)')
+ tdSql.checkRows(10)
+
+ tdSql.error('select avg(c0) from stb0 interval(1b)')
+ tdSql.error('select avg(c0) from stb0 interval(999b)')
+
+ tdSql.query('select avg(c0) from stb0 interval(1000b)')
+ tdSql.checkRows(100)
+
+ tdSql.query('select avg(c0) from stb0 interval(1u)')
+ tdSql.checkRows(100)
+
+ tdSql.query('select avg(c0) from stb0 interval(100000000b) sliding (100000000b)')
+ tdSql.checkRows(10)
+
+ # query : query above sqls by taosdemo and continuously
+ os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestSupportNanoQuerycsv.json -y " % binPath)
+
+ os.system("rm -rf ./query_res*.txt*")
+ os.system("rm -rf tools/taosdemoAllTest/*.py.sql")
+
+
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuerycsv.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuerycsv.json
new file mode 100644
index 0000000000000000000000000000000000000000..a3b3c75efa6680aa0d1da0ca7986d863408ee515
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoQuerycsv.json
@@ -0,0 +1,110 @@
+{
+ "filetype": "query",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "confirm_parameter_prompt": "no",
+ "databases": "nsdbcsv",
+ "query_times": 10,
+ "query_mode": "taosc",
+ "specified_table_query": {
+ "query_interval": 1,
+ "concurrent": 2,
+ "sqls": [
+ {
+ "sql": "select count(*) from stb0 where ts> \"2021-07-01 00:00:00.490000000\" ;",
+ "result": "./query_res0.txt"
+ },
+ {
+ "sql": "select count(*) from stb0 where ts < now -22d-1h-3s ;",
+ "result": "./query_res1.txt"
+ },
+ {
+ "sql": "select count(*) from stb0 where ts < 1626918583000000000 ;",
+ "result": "./query_res2.txt"
+ },
+ {
+ "sql": "select count(*) from stb0 where c2 <> 162687012800000000';",
+ "result": "./query_res3.txt"
+ },
+ {
+ "sql": "select count(*) from stb0 where c2 != \"2021-07-21 20:22:08.248246976\";",
+ "result": "./query_res4.txt"
+ },
+ {
+ "sql": "select count(*) from stb0 where ts between \"2021-07-01 00:00:00.000000000\" and \"2021-07-01 00:00:00.990000000\";",
+ "result": "./query_res5.txt"
+ },
+ {
+ "sql":"select count(*) from stb0 group by tbname;",
+ "result":"./query_res6.txt"
+ },
+ {
+ "sql":"select count(*) from stb0 where ts between 1625068800000000000 and 1625068801000000000;",
+ "result":"./query_res7.txt"
+ },
+ {
+ "sql":"select avg(c0) from stb0 interval(5000000000b);",
+ "result":"./query_res8.txt"
+ },
+ {
+ "sql":"select avg(c0) from stb0 interval(100000000b) sliding (100000000b);",
+ "result":"./query_res9.txt"
+ }
+
+ ]
+ },
+ "super_table_query": {
+ "stblname": "stb0",
+ "query_interval": 0,
+ "threads": 4,
+ "sqls": [
+ {
+ "sql": "select count(*) from xxxx where ts > \"2021-07-01 00:00:00.490000000\" ;",
+ "result": "./query_res_tb0.txt"
+ },
+ {
+ "sql":"select count(*) from xxxx where ts between \"2021-07-01 00:00:00.000000000\" and \"2021-07-01 00:00:00.990000000\" ;",
+ "result": "./query_res_tb1.txt"
+ },
+ {
+ "sql":"select first(*) from xxxx ;",
+ "result": "./query_res_tb2.txt"
+ },
+ {
+ "sql":"select last(*) from xxxx;",
+ "result": "./query_res_tb3.txt"
+
+ },
+ {
+ "sql":"select last_row(*) from xxxx ;",
+ "result": "./query_res_tb4.txt"
+
+ },
+ {
+ "sql":"select max(c0) from xxxx ;",
+ "result": "./query_res_tb5.txt"
+
+ },
+ {
+ "sql":"select min(c0) from xxxx ;",
+ "result": "./query_res_tb6.txt"
+
+ },
+ {
+ "sql":"select avg(c0) from xxxx ;",
+ "result": "./query_res_tb7.txt"
+
+ },
+ {
+ "sql":"select avg(c0) from xxxx interval(100000000b) sliding (100000000b) ;",
+ "result": "./query_res_tb8.txt"
+
+ }
+
+
+ ]
+ }
+ }
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json
new file mode 100644
index 0000000000000000000000000000000000000000..1cc834164e7c966a9ce565f1ce481d823b1ed2d1
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json
@@ -0,0 +1,32 @@
+{
+ "filetype":"subscribe",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "databases": "subnsdb",
+ "confirm_parameter_prompt": "no",
+ "specified_table_query":
+ {
+ "concurrent":2,
+ "mode":"sync",
+ "interval":10000,
+ "restart":"yes",
+ "keepProgress":"yes",
+ "sqls": [
+ {
+ "sql": "select * from stb0 where ts>= \"2021-07-01 00:00:00.000000000\" ;",
+ "result": "./subscribe_res0.txt"
+ },
+ {
+ "sql": "select * from stb0 where ts < now -2d-1h-3s ;",
+ "result": "./subscribe_res1.txt"
+ },
+ {
+ "sql": "select * from stb0 where ts < 1626918583000000000 ;",
+ "result": "./subscribe_res2.txt"
+ }]
+
+ }
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py
new file mode 100644
index 0000000000000000000000000000000000000000..6dcea6e7e0418a19c5499befc86477fe9e2e3c62
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py
@@ -0,0 +1,125 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import os
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+import time
+from datetime import datetime
+import subprocess
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root)-len("/build/bin")]
+ break
+ return buildPath
+
+ # get the number of subscriptions
+ def subTimes(self,filename):
+ self.filename = filename
+ command = 'cat %s |wc -l'% filename
+ times = int(subprocess.getstatusoutput(command)[1])
+ return times
+
+ # assert results
+ def assertCheck(self,filename,subResult,expectResult):
+ self.filename = filename
+ self.subResult = subResult
+ self.expectResult = expectResult
+ args0 = (filename, subResult, expectResult)
+ assert subResult == expectResult , "Queryfile:%s ,result is %s != expect: %s" % args0
+
+ def run(self):
+ buildPath = self.getBuildPath()
+ if (buildPath == ""):
+ tdLog.exit("taosd not found!")
+ else:
+ tdLog.info("taosd found in %s" % buildPath)
+ binPath = buildPath+ "/build/bin/"
+
+ # clear env
+ os.system("ps -ef |grep 'taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json' |grep -v 'grep' |awk '{print $2}'|xargs kill -9")
+ os.system("rm -rf ./subscribe_res*")
+ os.system("rm -rf ./all_subscribe_res*")
+
+
+ # insert data
+ os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json" % binPath)
+ os.system("nohup %staosdemo -f tools/taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json &" % binPath)
+ query_pid = int(subprocess.getstatusoutput('ps aux|grep "taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json" |grep -v "grep"|awk \'{print $2}\'')[1])
+
+
+ # merge result files
+ sleep(10)
+ os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt")
+ os.system("cat subscribe_res1.txt* > all_subscribe_res1.txt")
+ os.system("cat subscribe_res2.txt* > all_subscribe_res2.txt")
+
+
+ # correct subscribeTimes testcase
+ subTimes0 = self.subTimes("all_subscribe_res0.txt")
+ self.assertCheck("all_subscribe_res0.txt",subTimes0 ,200)
+
+ subTimes1 = self.subTimes("all_subscribe_res1.txt")
+ self.assertCheck("all_subscribe_res1.txt",subTimes1 ,200)
+
+ subTimes2 = self.subTimes("all_subscribe_res2.txt")
+ self.assertCheck("all_subscribe_res2.txt",subTimes2 ,200)
+
+
+ # insert extral data
+ tdSql.execute("use subnsdb")
+ tdSql.execute("insert into tb0_0 values(now,100.1000,'subtest1',now-1s)")
+ sleep(1)
+
+ os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt")
+ subTimes0 = self.subTimes("all_subscribe_res0.txt")
+ print("pass")
+ self.assertCheck("all_subscribe_res0.txt",subTimes0 ,202)
+
+
+
+ # correct data testcase
+ os.system("kill -9 %d" % query_pid)
+ sleep(3)
+ os.system("rm -rf ./subscribe_res*")
+ os.system("rm -rf ./all_subscribe*")
+ os.system("rm -rf ./*.py.sql")
+
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/tools/taosdemoTestTblAlt.py b/tests/pytest/tools/taosdemoTestTblAlt.py
index 56c535916a51046e65b2ddd9813141ddb8848bd1..b70525ae4d87465a59ad524067d8b1e4a61d526a 100644
--- a/tests/pytest/tools/taosdemoTestTblAlt.py
+++ b/tests/pytest/tools/taosdemoTestTblAlt.py
@@ -98,8 +98,8 @@ class TDTestCase:
break
time.sleep(1)
- print("alter table test.meters add column col10 int")
- tdSql.execute("alter table test.meters add column col10 int")
+ print("alter table test.meters add column c10 int")
+ tdSql.execute("alter table test.meters add column c10 int")
print("insert into test.t9 values (now, 1, 2, 3, 4, 0)")
tdSql.execute("insert into test.t9 values (now, 1, 2, 3, 4, 0)")
diff --git a/tests/pytest/tools/taosdumpTestNanoSupport.py b/tests/pytest/tools/taosdumpTestNanoSupport.py
new file mode 100644
index 0000000000000000000000000000000000000000..ca8832170b7706621f5ef9d3225fe2cf16141c34
--- /dev/null
+++ b/tests/pytest/tools/taosdumpTestNanoSupport.py
@@ -0,0 +1,362 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import os
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ self.ts = 1625068800000000000 # this is timestamp "2021-07-01 00:00:00"
+ self.numberOfTables = 10
+ self.numberOfRecords = 100
+
+ def checkCommunity(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+ if ("community" in selfPath):
+ return False
+ else:
+ return True
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosdump" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root) - len("/build/bin")]
+ break
+ return buildPath
+
+
+
+ def createdb(self, precision="ns"):
+ tb_nums = self.numberOfTables
+ per_tb_rows = self.numberOfRecords
+
+ def build_db(precision, start_time):
+ tdSql.execute("drop database if exists timedb1")
+ tdSql.execute(
+ "create database timedb1 days 10 keep 365 blocks 8 precision "+"\""+precision+"\"")
+
+ tdSql.execute("use timedb1")
+ tdSql.execute(
+ "create stable st(ts timestamp, c1 int, c2 nchar(10),c3 timestamp) tags(t1 int, t2 binary(10))")
+ for tb in range(tb_nums):
+ tbname = "t"+str(tb)
+ tdSql.execute("create table " + tbname +
+ " using st tags(1, 'beijing')")
+ sql = "insert into " + tbname + " values"
+ currts = start_time
+ if precision == "ns":
+ ts_seed = 1000000000
+ elif precision == "us":
+ ts_seed = 1000000
+ else:
+ ts_seed = 1000
+
+ for i in range(per_tb_rows):
+ sql += "(%d, %d, 'nchar%d',%d)" % (currts + i*ts_seed, i %
+ 100, i % 100, currts + i*100) # currts +1000ms (1000000000ns)
+ tdSql.execute(sql)
+
+ if precision == "ns":
+ start_time = 1625068800000000000
+ build_db(precision, start_time)
+
+ elif precision == "us":
+ start_time = 1625068800000000
+ build_db(precision, start_time)
+
+ elif precision == "ms":
+ start_time = 1625068800000
+ build_db(precision, start_time)
+
+ else:
+ print("other time precision not valid , please check! ")
+
+
+ def run(self):
+
+ # clear envs
+ os.system("rm -rf ./taosdumptest/")
+ tdSql.execute("drop database if exists dumptmp1")
+ tdSql.execute("drop database if exists dumptmp2")
+ tdSql.execute("drop database if exists dumptmp3")
+
+ if not os.path.exists("./taosdumptest/tmp1"):
+ os.makedirs("./taosdumptest/dumptmp1")
+ else:
+ print("path exist!")
+
+ if not os.path.exists("./taosdumptest/dumptmp2"):
+ os.makedirs("./taosdumptest/dumptmp2")
+
+ if not os.path.exists("./taosdumptest/dumptmp3"):
+ os.makedirs("./taosdumptest/dumptmp3")
+
+ buildPath = self.getBuildPath()
+ if (buildPath == ""):
+ tdLog.exit("taosdump not found!")
+ else:
+ tdLog.info("taosdump found in %s" % buildPath)
+ binPath = buildPath + "/build/bin/"
+
+ # create nano second database
+
+ self.createdb(precision="ns")
+
+ # dump all data
+
+ os.system(
+ "%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath)
+
+ # dump part data with -S -E
+ os.system(
+ '%staosdump --databases timedb1 -S 1625068810000000000 -E 1625068860000000000 -C ns -o ./taosdumptest/dumptmp2 ' %
+ binPath)
+ os.system(
+ '%staosdump --databases timedb1 -S 1625068810000000000 -o ./taosdumptest/dumptmp3 ' %
+ binPath)
+
+ # replace strings to dump in databases
+ os.system(
+ "sed -i \"s/timedb1/dumptmp1/g\" `grep timedb1 -rl ./taosdumptest/dumptmp1`")
+ os.system(
+ "sed -i \"s/timedb1/dumptmp2/g\" `grep timedb1 -rl ./taosdumptest/dumptmp2`")
+ os.system(
+ "sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`")
+
+ os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath)
+ os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath)
+ os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath)
+
+ # dump data and check for taosdump
+ tdSql.query("select count(*) from dumptmp1.st")
+ tdSql.checkData(0,0,1000)
+
+ tdSql.query("select count(*) from dumptmp2.st")
+ tdSql.checkData(0,0,510)
+
+ tdSql.query("select count(*) from dumptmp3.st")
+ tdSql.checkData(0,0,900)
+
+ # check data
+ origin_res = tdSql.getResult("select * from timedb1.st")
+ dump_res = tdSql.getResult("select * from dumptmp1.st")
+ if origin_res == dump_res:
+ tdLog.info("test nano second : dump check data pass for all data!" )
+ else:
+ tdLog.info("test nano second : dump check data failed for all data!" )
+
+ origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000000 and ts <= 1625068860000000000")
+ dump_res = tdSql.getResult("select * from dumptmp2.st")
+ if origin_res == dump_res:
+ tdLog.info(" test nano second : dump check data pass for data! " )
+ else:
+ tdLog.info(" test nano second : dump check data failed for data !" )
+
+ origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000000 ")
+ dump_res = tdSql.getResult("select * from dumptmp3.st")
+ if origin_res == dump_res:
+ tdLog.info(" test nano second : dump check data pass for data! " )
+ else:
+ tdLog.info(" test nano second : dump check data failed for data !" )
+
+
+ # us second support test case
+
+ os.system("rm -rf ./taosdumptest/")
+ tdSql.execute("drop database if exists dumptmp1")
+ tdSql.execute("drop database if exists dumptmp2")
+ tdSql.execute("drop database if exists dumptmp3")
+
+ if not os.path.exists("./taosdumptest/tmp1"):
+ os.makedirs("./taosdumptest/dumptmp1")
+ else:
+ print("path exits!")
+
+ if not os.path.exists("./taosdumptest/dumptmp2"):
+ os.makedirs("./taosdumptest/dumptmp2")
+
+ if not os.path.exists("./taosdumptest/dumptmp3"):
+ os.makedirs("./taosdumptest/dumptmp3")
+
+ buildPath = self.getBuildPath()
+ if (buildPath == ""):
+ tdLog.exit("taosdump not found!")
+ else:
+ tdLog.info("taosdump found in %s" % buildPath)
+ binPath = buildPath + "/build/bin/"
+
+ self.createdb(precision="us")
+
+ os.system(
+ "%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath)
+
+ os.system(
+ '%staosdump --databases timedb1 -S 1625068810000000 -E 1625068860000000 -C us -o ./taosdumptest/dumptmp2 ' %
+ binPath)
+ os.system(
+ '%staosdump --databases timedb1 -S 1625068810000000 -o ./taosdumptest/dumptmp3 ' %
+ binPath)
+
+ os.system(
+ "sed -i \"s/timedb1/dumptmp1/g\" `grep timedb1 -rl ./taosdumptest/dumptmp1`")
+ os.system(
+ "sed -i \"s/timedb1/dumptmp2/g\" `grep timedb1 -rl ./taosdumptest/dumptmp2`")
+ os.system(
+ "sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`")
+
+ os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath)
+ os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath)
+ os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath)
+
+
+ tdSql.query("select count(*) from dumptmp1.st")
+ tdSql.checkData(0,0,1000)
+
+ tdSql.query("select count(*) from dumptmp2.st")
+ tdSql.checkData(0,0,510)
+
+ tdSql.query("select count(*) from dumptmp3.st")
+ tdSql.checkData(0,0,900)
+
+
+ origin_res = tdSql.getResult("select * from timedb1.st")
+ dump_res = tdSql.getResult("select * from dumptmp1.st")
+ if origin_res == dump_res:
+ tdLog.info("test us second : dump check data pass for all data!" )
+ else:
+ tdLog.info("test us second : dump check data failed for all data!" )
+
+ origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000 and ts <= 1625068860000000")
+ dump_res = tdSql.getResult("select * from dumptmp2.st")
+ if origin_res == dump_res:
+ tdLog.info(" test us second : dump check data pass for data! " )
+ else:
+ tdLog.info(" test us second : dump check data failed for data!" )
+
+ origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000 ")
+ dump_res = tdSql.getResult("select * from dumptmp3.st")
+ if origin_res == dump_res:
+ tdLog.info(" test us second : dump check data pass for data! " )
+ else:
+ tdLog.info(" test us second : dump check data failed for data! " )
+
+
+ # ms second support test case
+
+ os.system("rm -rf ./taosdumptest/")
+ tdSql.execute("drop database if exists dumptmp1")
+ tdSql.execute("drop database if exists dumptmp2")
+ tdSql.execute("drop database if exists dumptmp3")
+
+ if not os.path.exists("./taosdumptest/tmp1"):
+ os.makedirs("./taosdumptest/dumptmp1")
+ else:
+ print("path exits!")
+
+ if not os.path.exists("./taosdumptest/dumptmp2"):
+ os.makedirs("./taosdumptest/dumptmp2")
+
+ if not os.path.exists("./taosdumptest/dumptmp3"):
+ os.makedirs("./taosdumptest/dumptmp3")
+
+ buildPath = self.getBuildPath()
+ if (buildPath == ""):
+ tdLog.exit("taosdump not found!")
+ else:
+ tdLog.info("taosdump found in %s" % buildPath)
+ binPath = buildPath + "/build/bin/"
+
+ self.createdb(precision="ms")
+
+ os.system(
+ "%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath)
+
+ os.system(
+ '%staosdump --databases timedb1 -S 1625068810000 -E 1625068860000 -C ms -o ./taosdumptest/dumptmp2 ' %
+ binPath)
+ os.system(
+ '%staosdump --databases timedb1 -S 1625068810000 -o ./taosdumptest/dumptmp3 ' %
+ binPath)
+
+ os.system(
+ "sed -i \"s/timedb1/dumptmp1/g\" `grep timedb1 -rl ./taosdumptest/dumptmp1`")
+ os.system(
+ "sed -i \"s/timedb1/dumptmp2/g\" `grep timedb1 -rl ./taosdumptest/dumptmp2`")
+ os.system(
+ "sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`")
+
+ os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath)
+ os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath)
+ os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath)
+
+
+ tdSql.query("select count(*) from dumptmp1.st")
+ tdSql.checkData(0,0,1000)
+
+ tdSql.query("select count(*) from dumptmp2.st")
+ tdSql.checkData(0,0,510)
+
+ tdSql.query("select count(*) from dumptmp3.st")
+ tdSql.checkData(0,0,900)
+
+
+ origin_res = tdSql.getResult("select * from timedb1.st")
+ dump_res = tdSql.getResult("select * from dumptmp1.st")
+ if origin_res == dump_res:
+ tdLog.info("test ms second : dump check data pass for all data!" )
+ else:
+ tdLog.info("test ms second : dump check data failed for all data!" )
+
+ origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000 and ts <= 1625068860000")
+ dump_res = tdSql.getResult("select * from dumptmp2.st")
+ if origin_res == dump_res:
+ tdLog.info(" test ms second : dump check data pass for data! " )
+ else:
+ tdLog.info(" test ms second : dump check data failed for data!" )
+
+ origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000 ")
+ dump_res = tdSql.getResult("select * from dumptmp3.st")
+ if origin_res == dump_res:
+ tdLog.info(" test ms second : dump check data pass for data! " )
+ else:
+ tdLog.info(" test ms second : dump check data failed for data! " )
+
+
+ os.system("rm -rf ./taosdumptest/")
+ os.system("rm -rf ./dump_result.txt")
+ os.system("rm -rf *.py.sql")
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/update/merge_commit_data2.py b/tests/pytest/update/merge_commit_data2.py
index 3f0fc718ad83244353bf88da905e6ac0ff800cb5..a334f39e867e80ca05a8de57016e4581ee5fb68c 100644
--- a/tests/pytest/update/merge_commit_data2.py
+++ b/tests/pytest/update/merge_commit_data2.py
@@ -28,6 +28,7 @@ class TDTestCase:
def restart_taosd(self,db):
tdDnodes.stop(1)
tdDnodes.startWithoutSleep(1)
+ tdLog.sleep(2)
tdSql.execute("use %s;" % db)
def date_to_timestamp_microseconds(self, date):
diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py
index 795af8a1f8db387228e52c99de9a5ac36ff4cad2..b42af27d063aa9c8b08ac58374878a7153912142 100644
--- a/tests/pytest/util/sql.py
+++ b/tests/pytest/util/sql.py
@@ -203,6 +203,19 @@ class TDSql:
self.checkRowCol(row, col)
return self.queryResult[row][col]
+ def getResult(self, sql):
+ self.sql = sql
+ try:
+ self.cursor.execute(sql)
+ self.queryResult = self.cursor.fetchall()
+ except Exception as e:
+ caller = inspect.getframeinfo(inspect.stack()[1][0])
+ args = (caller.filename, caller.lineno, sql, repr(e))
+ tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
+ raise Exception(repr(e))
+ return self.queryResult
+
+
def executeTimes(self, sql, times):
for i in range(times):
try:
@@ -304,4 +317,4 @@ class TDSql:
tdLog.info("dir: %s is created" %dir)
pass
-tdSql = TDSql()
+tdSql = TDSql()
\ No newline at end of file
diff --git a/tests/pytest/wal/sdbCompClusterReplica2.py b/tests/pytest/wal/sdbCompClusterReplica2.py
index e364145e190143f2807612350757b64519019daa..ba80e3864aed27c091dd5ec72ca9f09ea2c36126 100644
--- a/tests/pytest/wal/sdbCompClusterReplica2.py
+++ b/tests/pytest/wal/sdbCompClusterReplica2.py
@@ -86,7 +86,7 @@ class TwoClients:
tdSql.execute("alter table stb2_0 add column col2 binary(4)")
tdSql.execute("alter table stb2_0 drop column col1")
tdSql.execute("insert into stb2_0 values(1614218422000,8638,'R')")
- tdSql.execute("drop dnode 10")
+ tdSql.execute("drop dnode chenhaoran02")
sleep(10)
os.system("rm -rf /var/lib/taos/*")
print("clear dnode chenhaoran02'data files")
@@ -97,9 +97,6 @@ class TwoClients:
tdSql.execute("create dnode chenhaoran02 ;")
-
-
-
# stop taosd and compact wal file
os.system("ps -ef |grep taosd |grep -v 'grep' |awk '{print $2}'|xargs kill -2")
sleep(10)
@@ -145,4 +142,4 @@ class TwoClients:
clients = TwoClients()
clients.initConnection()
# clients.getBuildPath()
-clients.run()
\ No newline at end of file
+clients.run()
diff --git a/tests/robust/cluster.sh b/tests/robust/cluster.sh
old mode 100644
new mode 100755
index 166014fefb2a8d67acb9d69ecd897b902195caf0..b291fb4d9ea229c4620004a0305b06a08994a99e
--- a/tests/robust/cluster.sh
+++ b/tests/robust/cluster.sh
@@ -23,7 +23,9 @@ menu(){
echo "=============================="
echo "3 arbitrator"
echo "=============================="
- echo "4 exit"
+ echo "4 alter replica"
+ echo "=============================="
+ echo "5 exit"
echo "=============================="
}
@@ -310,6 +312,7 @@ do
2)
var=`ps -ef | grep tarbitrator | awk '{print $2}' | head -n 1`
kill -9 $var
+ echo -e "\033[32mSuccessfully stop arbitrator $3 \033[0m"
break
;;
3)
@@ -318,6 +321,13 @@ do
esac
;;
4)
+ read -p "Enter replica number: " rep
+ read -p "Enter database name: " db
+ taos -s "alter database $db replica $rep"
+ echo -e "\033[32mSuccessfully change $db's replica to $rep \033[0m"
+ break
+ ;;
+ 5)
break
;;
esac
diff --git a/tests/robust/monitor.sh b/tests/robust/monitor.sh
new file mode 100755
index 0000000000000000000000000000000000000000..4fc1fed75275f87eb184ed34234fdb04a7a5e312
--- /dev/null
+++ b/tests/robust/monitor.sh
@@ -0,0 +1,8 @@
+while :
+do
+ dlog=`taos -s "show dnodes"`
+ mlog=`taos -s "show mnodes"`
+ echo "$dlog" | tee -a dnode.log
+ echo "$mlog" | tee -a mnode.log
+ sleep 1s
+done
\ No newline at end of file
diff --git a/tests/script/api/makefile b/tests/script/api/makefile
index 5eeb1342887afc2c4f920aa673466eef7f7ae510..7595594cbf2572623dd18648c8c4fa8e65dd966a 100644
--- a/tests/script/api/makefile
+++ b/tests/script/api/makefile
@@ -13,7 +13,9 @@ all: $(TARGET)
exe:
gcc $(CFLAGS) ./batchprepare.c -o $(ROOT)batchprepare $(LFLAGS)
gcc $(CFLAGS) ./stmtBatchTest.c -o $(ROOT)stmtBatchTest $(LFLAGS)
+ gcc $(CFLAGS) ./stmtTest.c -o $(ROOT)stmtTest $(LFLAGS)
clean:
rm $(ROOT)batchprepare
rm $(ROOT)stmtBatchTest
+ rm $(ROOT)stmtTest
diff --git a/tests/script/api/stmtTest.c b/tests/script/api/stmtTest.c
new file mode 100644
index 0000000000000000000000000000000000000000..9595fe5b2d72e3291959828badf45abc2f7cb71e
--- /dev/null
+++ b/tests/script/api/stmtTest.c
@@ -0,0 +1,238 @@
+#include
+#include
+#include
+#include "taos.h"
+#include
+#include
+#include
+
+#define PRINT_ERROR printf("\033[31m");
+#define PRINT_SUCCESS printf("\033[32m");
+
+void execute_simple_sql(void *taos, char *sql) {
+ TAOS_RES *result = taos_query(taos, sql);
+ if ( result == NULL || taos_errno(result) != 0) {
+ PRINT_ERROR
+ printf("failed to %s, Reason: %s\n", sql, taos_errstr(result));
+ taos_free_result(result);
+ exit(EXIT_FAILURE);
+ }
+ taos_free_result(result);
+ PRINT_SUCCESS
+ printf("Successfully %s\n", sql);
+}
+
+void check_result(TAOS *taos, int id, int expected) {
+ char sql[256] = {0};
+ sprintf(sql, "select * from t%d", id);
+ TAOS_RES *result;
+ result = taos_query(taos, sql);
+ if ( result == NULL || taos_errno(result) != 0) {
+ PRINT_ERROR
+ printf("failed to %s, Reason: %s\n", sql, taos_errstr(result));
+ exit(EXIT_FAILURE);
+ }
+ PRINT_SUCCESS
+ printf("Successfully execute %s\n", sql);
+ int rows = 0;
+ TAOS_ROW row;
+ while ((row = taos_fetch_row(result))) {
+ rows++;
+ }
+ if (rows == expected) {
+ PRINT_SUCCESS
+ printf("table t%d's %d rows are fetched as expected\n", id, rows);
+ } else {
+ PRINT_ERROR
+ printf("table t%d's %d rows are fetched but %d expected\n", id, rows, expected);
+ }
+ taos_free_result(result);
+}
+
+int main(int argc, char *argv[]) {
+ void *taos = taos_connect("127.0.0.1", "root", "taosdata", NULL, 0);
+ if (taos == NULL) {
+ PRINT_ERROR
+ printf("TDengine error: failed to connect\n");
+ exit(EXIT_FAILURE);
+ }
+ PRINT_SUCCESS
+ printf("Successfully connected to TDengine\n");
+
+ execute_simple_sql(taos, "drop database if exists test");
+ execute_simple_sql(taos, "create database test");
+ execute_simple_sql(taos, "use test");
+ execute_simple_sql(taos, "create table super(ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 binary(8), c6 smallint, c7 tinyint, c8 bool, c9 nchar(8), c10 timestamp) tags (t1 int, t2 bigint, t3 float, t4 double, t5 binary(8), t6 smallint, t7 tinyint, t8 bool, t9 nchar(8))");
+
+ char *sql = calloc(1, 1024*1024);
+ int sqlLen = 0;
+ sqlLen = sprintf(sql, "create table");
+ for (int i = 0; i < 10; i++) {
+ sqlLen += sprintf(sql + sqlLen, " t%d using super tags (%d, 2147483648, 0.1, 0.000000001, 'abcdefgh', 32767, 127, 1, '一二三四五六七八')", i, i);
+ }
+ execute_simple_sql(taos, sql);
+
+
+ int code = taos_load_table_info(taos, "t0,t1,t2,t3,t4,t5,t6,t7,t8,t9");
+ if (code != 0) {
+ PRINT_ERROR
+ printf("failed to load table info: 0x%08x\n", code);
+ exit(EXIT_FAILURE);
+ }
+ PRINT_SUCCESS
+ printf("Successfully load table info\n");
+
+ TAOS_STMT *stmt = taos_stmt_init(taos);
+ if (stmt == NULL) {
+ PRINT_ERROR
+ printf("TDengine error: failed to init taos_stmt\n");
+ exit(EXIT_FAILURE);
+ }
+ PRINT_SUCCESS
+ printf("Successfully init taos_stmt\n");
+
+ uintptr_t c10len = 0;
+ struct {
+ int64_t c1;
+ int32_t c2;
+ int64_t c3;
+ float c4;
+ double c5;
+ unsigned char c6[8];
+ int16_t c7;
+ int8_t c8;
+ int8_t c9;
+ char c10[32];
+ } v = {0};
+ TAOS_BIND params[11];
+ params[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
+ params[0].buffer_length = sizeof(v.c1);
+ params[0].buffer = &v.c1;
+ params[0].length = ¶ms[0].buffer_length;
+ params[0].is_null = NULL;
+
+ params[1].buffer_type = TSDB_DATA_TYPE_INT;
+ params[1].buffer_length = sizeof(v.c2);
+ params[1].buffer = &v.c2;
+ params[1].length = ¶ms[1].buffer_length;
+ params[1].is_null = NULL;
+
+ params[2].buffer_type = TSDB_DATA_TYPE_BIGINT;
+ params[2].buffer_length = sizeof(v.c3);
+ params[2].buffer = &v.c3;
+ params[2].length = ¶ms[2].buffer_length;
+ params[2].is_null = NULL;
+
+ params[3].buffer_type = TSDB_DATA_TYPE_FLOAT;
+ params[3].buffer_length = sizeof(v.c4);
+ params[3].buffer = &v.c4;
+ params[3].length = ¶ms[3].buffer_length;
+ params[3].is_null = NULL;
+
+ params[4].buffer_type = TSDB_DATA_TYPE_DOUBLE;
+ params[4].buffer_length = sizeof(v.c5);
+ params[4].buffer = &v.c5;
+ params[4].length = ¶ms[4].buffer_length;
+ params[4].is_null = NULL;
+
+ params[5].buffer_type = TSDB_DATA_TYPE_BINARY;
+ params[5].buffer_length = sizeof(v.c6);
+ params[5].buffer = &v.c6;
+ params[5].length = ¶ms[5].buffer_length;
+ params[5].is_null = NULL;
+
+ params[6].buffer_type = TSDB_DATA_TYPE_SMALLINT;
+ params[6].buffer_length = sizeof(v.c7);
+ params[6].buffer = &v.c7;
+ params[6].length = ¶ms[6].buffer_length;
+ params[6].is_null = NULL;
+
+ params[7].buffer_type = TSDB_DATA_TYPE_TINYINT;
+ params[7].buffer_length = sizeof(v.c8);
+ params[7].buffer = &v.c8;
+ params[7].length = ¶ms[7].buffer_length;
+ params[7].is_null = NULL;
+
+ params[8].buffer_type = TSDB_DATA_TYPE_BOOL;
+ params[8].buffer_length = sizeof(v.c9);
+ params[8].buffer = &v.c9;
+ params[8].length = ¶ms[8].buffer_length;
+ params[8].is_null = NULL;
+
+ params[9].buffer_type = TSDB_DATA_TYPE_NCHAR;
+ params[9].buffer_length = sizeof(v.c10);
+ params[9].buffer = &v.c10;
+ params[9].length = &c10len;
+ params[9].is_null = NULL;
+
+ params[10].buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
+ params[10].buffer_length = sizeof(v.c1);
+ params[10].buffer = &v.c1;
+ params[10].length = ¶ms[10].buffer_length;
+ params[10].is_null = NULL;
+
+ char *stmt_sql = "insert into ? values (?,?,?,?,?,?,?,?,?,?,?)";
+ code = taos_stmt_prepare(stmt, stmt_sql, 0);
+ if (code != 0){
+ PRINT_ERROR
+ printf("failed to execute taos_stmt_prepare. code:0x%x\n", code);
+ exit(EXIT_FAILURE);
+ }
+ PRINT_SUCCESS
+ printf("Successfully execute taos_stmt_prepare\n");
+
+ for (int i = 0; i < 10; i++) {
+ char buf[32];
+ sprintf(buf, "t%d", i);
+ if (i == 0) {
+ code = taos_stmt_set_tbname(stmt, buf);
+ if (code != 0) {
+ PRINT_ERROR
+ printf("failed to execute taos_stmt_set_tbname. code:0x%x\n", code);
+ exit(EXIT_FAILURE);
+ }
+ PRINT_SUCCESS
+ printf("Successfully execute taos_stmt_set_tbname\n");
+ } else {
+ code = taos_stmt_set_sub_tbname(stmt, buf);
+ if (code != 0) {
+ PRINT_ERROR
+ printf("failed to execute taos_stmt_set_sub_tbname. code:0x%x\n", code);
+ exit(EXIT_FAILURE);
+ }
+ PRINT_SUCCESS
+ printf("Successfully execute taos_stmt_set_sub_tbname\n");
+ }
+
+ v.c1 = (int64_t)1591060628000;
+ v.c2 = (int32_t)2147483647;
+ v.c3 = (int64_t)2147483648;
+ v.c4 = (float)0.1;
+ v.c5 = (double)0.000000001;
+ for (int j = 0; j < sizeof(v.c6); j++) {
+ v.c6[j] = (char)('a');
+ }
+ v.c7 = 32767;
+ v.c8 = 127;
+ v.c9 = 1;
+ strcpy(v.c10, "一二三四五六七八");
+ c10len=strlen(v.c10);
+ taos_stmt_bind_param(stmt, params);
+ taos_stmt_add_batch(stmt);
+ }
+
+ if (taos_stmt_execute(stmt) != 0) {
+ PRINT_ERROR
+ printf("failed to execute insert statement.\n");
+ exit(EXIT_FAILURE);
+ }
+ PRINT_SUCCESS
+ printf("Successfully execute insert statement.\n");
+
+ taos_stmt_close(stmt);
+ for (int i = 0; i < 10; i++) {
+ check_result(taos, i, 1);
+ }
+
+ return 0;
+}
diff --git a/tests/script/general/parser/function.sim b/tests/script/general/parser/function.sim
index a3470b1763e30b8c6c6baa7b897de40910a84743..5edadad3a6686ad8fafee6d24e741bb63622c20e 100644
--- a/tests/script/general/parser/function.sim
+++ b/tests/script/general/parser/function.sim
@@ -783,7 +783,7 @@ endi
sql create stable st1 (ts timestamp, f1 int, f2 int) tags (id int);
sql create table tb1 using st1 tags(1);
-sql insert into tb1 values (now, 1, 1);
+sql insert into tb1 values ('2021-07-02 00:00:00', 1, 1);
sql select stddev(f1) from st1 group by f1;
diff --git a/tests/script/general/parser/join.sim b/tests/script/general/parser/join.sim
index 2c14a86c3a6532088d0dce584199c0d5ffbed6fd..e2132589bd3a54d42e683094f184b3a4a4932f71 100644
--- a/tests/script/general/parser/join.sim
+++ b/tests/script/general/parser/join.sim
@@ -444,6 +444,10 @@ if $rows != $val then
return -1
endi
+print ================>TD-5600
+sql select first(join_tb0.c8),first(join_tb0.c9) from join_tb1 , join_tb0 where join_tb1.ts = join_tb0.ts and join_tb1.ts <= 100002 and join_tb1.ts>=100000 interval(1s) fill(linear);
+
+
#===============================================================
sql select first(join_tb0.c8),first(join_tb0.c9) from join_tb1 , join_tb0 where join_tb1.ts = join_tb0.ts and join_tb1.ts <= 100002 and join_tb0.c7 = true
diff --git a/tests/script/general/stream/metrics_del.sim b/tests/script/general/stream/metrics_del.sim
index 321658cd8da686642e35936e0d622079aa9d7f1f..6cc3da71e9a5c34f83a74be5e4fe0345e94beadf 100644
--- a/tests/script/general/stream/metrics_del.sim
+++ b/tests/script/general/stream/metrics_del.sim
@@ -34,11 +34,11 @@ while $i < $tbNum
$tb = $tbPrefix . $i
sql create table $tb using $mt tags( $i )
- $x = -1440
+ $x = 0
$y = 0
while $y < $rowNum
- $ms = $x . m
- sql insert into $tb values (now $ms , $y , $y )
+ $ts = 1626710400000 + $x
+ sql insert into $tb values ($ts , $y , $y )
$x = $x + 1
$y = $y + 1
endw
diff --git a/tests/script/sh/stop_dnodes.sh b/tests/script/sh/stop_dnodes.sh
index 430f39901e62415e780999171139fcd961cdd54c..4c6d8e03510a39c2d5d1d020b5cfe7dabee39cb0 100755
--- a/tests/script/sh/stop_dnodes.sh
+++ b/tests/script/sh/stop_dnodes.sh
@@ -14,7 +14,7 @@ while [ -n "$PID" ]; do
echo kill -9 $PID
pkill -9 taosd
echo "Killing processes locking on port 6030"
- if [[ "$OS_TYPE" != "Darwin" ]]; then
+ if [ "$OS_TYPE" != "Darwin" ]; then
fuser -k -n tcp 6030
else
lsof -nti:6030 | xargs kill -9
@@ -26,7 +26,7 @@ PID=`ps -ef|grep -w tarbitrator | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]; do
echo kill -9 $PID
pkill -9 tarbitrator
- if [[ "$OS_TYPE" != "Darwin" ]]; then
+ if [ "$OS_TYPE" != "Darwin" ]; then
fuser -k -n tcp 6040
else
lsof -nti:6040 | xargs kill -9
diff --git a/tests/tsim/src/simExe.c b/tests/tsim/src/simExe.c
index a05f46ce0de54628f289c937e959ccc3337e00a9..397accfea57fec92dbe2b7f9b5c4b730a91e9cbd 100644
--- a/tests/tsim/src/simExe.c
+++ b/tests/tsim/src/simExe.c
@@ -808,6 +808,11 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) {
break;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
+ if (length[i] < 0 || length[i] > 1 << 20) {
+ fprintf(stderr, "Invalid length(%d) of BINARY or NCHAR\n", length[i]);
+ exit(-1);
+ }
+
memset(value, 0, MAX_QUERY_VALUE_LEN);
memcpy(value, row[i], length[i]);
value[length[i]] = 0;