提交 89a15326 编写于 作者: H Haojun Liao

Merge branch 'develop' into feature/query

......@@ -111,7 +111,7 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性
2. 应用将打上版本号的写请求封装一个WAL Head, 写入WAL(Write Ahead Log)
3. 应用调用API syncForwardToPeer,如果vnode B是slave状态,sync模块将包含WAL Head的数据包通过Forward消息发送给vnode B,否则就不转发。
4. vnode B收到Forward消息后,调用回调函数writeToCache, 交给应用处理
5. vnode B应用在写入成功后,都需要调用syncAckForward通知sync模块已经写入成功。
5. vnode B应用在写入成功后,都需要调用syncConfirmForward通知sync模块已经写入成功。
6. 如果quorum大于1,vnode B需要等待应用的回复确认,收到确认后,vnode B发送Forward Response消息给node A。
7. 如果quorum大于1,vnode A需要等待vnode B或其他副本对Forward消息的确认。
8. 如果quorum大于1,vnode A收到quorum-1条确认消息后,调用回调函数confirmForward,通知应用写入成功。
......
......@@ -323,8 +323,6 @@ Vnode会保持一个数据版本号(Version),对内存数据进行持久化存
采用同步复制,系统的性能会有所下降,而且latency会增加。因为元数据要强一致,mnode之间的数据同步缺省就是采用的同步复制。
注:vnode之间的同步复制仅仅企业版支持
## <a class="anchor" id="persistence"></a>缓存与持久化
### 缓存
......
......@@ -48,7 +48,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63 用于 NULL |
| 4 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] |
| 5 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] |
| 6 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。理论上,最长可以有 16374 字节,但由于每行数据最多 16K 字节,实际上限一般小于理论值。binary 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 binary(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 byte 的存储空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 |
| 6 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。理论上,最长可以有 16374 字节,但由于每行数据最多 16K 字节,实际上限一般小于理论值。binary 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 binary(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 byte 的存储空间,总共固定占用 20 bytes 的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 |
| 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767], -32768 用于 NULL |
| 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128 用于 NULL |
| 9 | BOOL | 1 | 布尔型,{true, false} |
......
......@@ -322,8 +322,6 @@ For scenarios with higher data consistency requirements, asynchronous data repli
With synchronous replication, performance of system will decrease and latency will increase. Because metadata needs strong consistent, the default for data synchronization between mnodes is synchronous replication.
Note: synchronous replication between vnodes is only supported in Enterprise Edition
## <a class="anchor" id="persistence"></a> Caching and Persistence
### Caching
......
......@@ -1864,8 +1864,8 @@ static SMemRow tdGenMemRowFromBuilder(SMemRowBuilder* pBuilder) {
while (i < nColsBound) {
int16_t colId = payloadColId(p);
uint8_t colType = payloadColType(p);
tdAppendKvColVal(kvRow, POINTER_SHIFT(pVals,payloadColOffset(p)), colId, colType, toffset);
toffset += sizeof(SColIdx);
tdAppendKvColVal(kvRow, POINTER_SHIFT(pVals,payloadColOffset(p)), colId, colType, &toffset);
//toffset += sizeof(SColIdx);
p = payloadNextCol(p);
++i;
}
......
......@@ -55,7 +55,7 @@ extern "C" {
typedef struct {
int8_t type; // Column type
int16_t colId; // column ID
uint16_t bytes; // column bytes
int16_t bytes; // column bytes (restore to int16_t in case of misuse)
uint16_t offset; // point offset in SDataRow after the header part.
} STColumn;
......@@ -232,6 +232,83 @@ static FORCE_INLINE void *tdGetRowDataOfCol(SDataRow row, int8_t type, int32_t o
}
}
static FORCE_INLINE void *tdGetPtrToCol(SDataRow row, STSchema *pSchema, int idx) {
return POINTER_SHIFT(row, TD_DATA_ROW_HEAD_SIZE + pSchema->columns[idx].offset);
}
static FORCE_INLINE void *tdGetColOfRowBySchema(SDataRow row, STSchema *pSchema, int idx) {
int16_t offset = TD_DATA_ROW_HEAD_SIZE + pSchema->columns[idx].offset;
int8_t type = pSchema->columns[idx].type;
return tdGetRowDataOfCol(row, type, offset);
}
static FORCE_INLINE bool tdIsColOfRowNullBySchema(SDataRow row, STSchema *pSchema, int idx) {
int16_t offset = TD_DATA_ROW_HEAD_SIZE + pSchema->columns[idx].offset;
int8_t type = pSchema->columns[idx].type;
return isNull(tdGetRowDataOfCol(row, type, offset), type);
}
static FORCE_INLINE void tdSetColOfRowNullBySchema(SDataRow row, STSchema *pSchema, int idx) {
int16_t offset = TD_DATA_ROW_HEAD_SIZE + pSchema->columns[idx].offset;
int8_t type = pSchema->columns[idx].type;
int16_t bytes = pSchema->columns[idx].bytes;
setNull(tdGetRowDataOfCol(row, type, offset), type, bytes);
}
static FORCE_INLINE void tdCopyColOfRowBySchema(SDataRow dst, STSchema *pDstSchema, int dstIdx, SDataRow src, STSchema *pSrcSchema, int srcIdx) {
int8_t type = pDstSchema->columns[dstIdx].type;
ASSERT(type == pSrcSchema->columns[srcIdx].type);
void *pData = tdGetPtrToCol(dst, pDstSchema, dstIdx);
void *value = tdGetPtrToCol(src, pSrcSchema, srcIdx);
switch (type) {
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
*(VarDataOffsetT *)pData = *(VarDataOffsetT *)value;
pData = POINTER_SHIFT(dst, *(VarDataOffsetT *)pData);
value = POINTER_SHIFT(src, *(VarDataOffsetT *)value);
memcpy(pData, value, varDataTLen(value));
break;
case TSDB_DATA_TYPE_NULL:
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT:
case TSDB_DATA_TYPE_UTINYINT:
*(uint8_t *)pData = *(uint8_t *)value;
break;
case TSDB_DATA_TYPE_SMALLINT:
case TSDB_DATA_TYPE_USMALLINT:
*(uint16_t *)pData = *(uint16_t *)value;
break;
case TSDB_DATA_TYPE_INT:
case TSDB_DATA_TYPE_UINT:
*(uint32_t *)pData = *(uint32_t *)value;
break;
case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_UBIGINT:
*(uint64_t *)pData = *(uint64_t *)value;
break;
case TSDB_DATA_TYPE_FLOAT:
SET_FLOAT_PTR(pData, value);
break;
case TSDB_DATA_TYPE_DOUBLE:
SET_DOUBLE_PTR(pData, value);
break;
case TSDB_DATA_TYPE_TIMESTAMP:
if (pSrcSchema->columns[srcIdx].colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
*(TSKEY *)pData = tdGetKey(*(TKEY *)value);
} else {
*(TSKEY *)pData = *(TSKEY *)value;
}
break;
default:
memcpy(pData, value, pSrcSchema->columns[srcIdx].bytes);
}
}
// ----------------- Data column structure
typedef struct SDataCol {
int8_t type; // column type
......@@ -335,7 +412,7 @@ void tdResetDataCols(SDataCols *pCols);
int tdInitDataCols(SDataCols *pCols, STSchema *pSchema);
SDataCols *tdDupDataCols(SDataCols *pCols, bool keepData);
SDataCols *tdFreeDataCols(SDataCols *pCols);
int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset);
int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset, bool forceSetNull);
// ----------------- K-V data row structure
/* |<-------------------------------------- len -------------------------------------------->|
......@@ -366,6 +443,7 @@ typedef struct {
#define kvRowColIdxAt(r, i) (kvRowColIdx(r) + (i))
#define kvRowFree(r) tfree(r)
#define kvRowEnd(r) POINTER_SHIFT(r, kvRowLen(r))
#define kvRowValLen(r) (kvRowLen(r) - TD_KV_ROW_HEAD_SIZE - sizeof(SColIdx) * kvRowNCols(r))
#define kvRowTKey(r) (*(TKEY *)(kvRowValues(r)))
#define kvRowKey(r) tdGetKey(kvRowTKey(r))
#define kvRowDeleted(r) TKEY_IS_DELETED(kvRowTKey(r))
......@@ -397,9 +475,9 @@ static FORCE_INLINE void *tdGetKVRowIdxOfCol(SKVRow row, int16_t colId) {
}
// offset here not include kvRow header length
static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t colId, int8_t type, int32_t offset) {
static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t colId, int8_t type, int32_t *offset) {
ASSERT(value != NULL);
int32_t toffset = offset + TD_KV_ROW_HEAD_SIZE;
int32_t toffset = *offset + TD_KV_ROW_HEAD_SIZE;
SColIdx *pColIdx = (SColIdx *)POINTER_SHIFT(row, toffset);
char * ptr = (char *)POINTER_SHIFT(row, kvRowLen(row));
......@@ -410,7 +488,7 @@ static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t
memcpy(ptr, value, varDataTLen(value));
kvRowLen(row) += varDataTLen(value);
} else {
if (offset == 0) {
if (*offset == 0) {
ASSERT(type == TSDB_DATA_TYPE_TIMESTAMP);
TKEY tvalue = tdGetTKEY(*(TSKEY *)value);
memcpy(ptr, (void *)(&tvalue), TYPE_BYTES[type]);
......@@ -419,9 +497,27 @@ static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t
}
kvRowLen(row) += TYPE_BYTES[type];
}
*offset += sizeof(SColIdx);
return 0;
}
// NOTE: offset here including the header size
static FORCE_INLINE void *tdGetKvRowDataOfCol(void *row, int32_t offset) { return POINTER_SHIFT(row, offset); }
static FORCE_INLINE void *tdGetKVRowValOfColEx(SKVRow row, int16_t colId, int32_t *nIdx) {
while (*nIdx < kvRowNCols(row)) {
SColIdx *pColIdx = kvRowColIdxAt(row, *nIdx);
if (pColIdx->colId == colId) {
++(*nIdx);
return tdGetKvRowDataOfCol(row, pColIdx->offset);
} else if (pColIdx->colId > colId) {
return NULL;
} else {
++(*nIdx);
}
}
return NULL;
}
// ----------------- K-V data row builder
typedef struct {
......@@ -494,7 +590,7 @@ typedef void *SMemRow;
#define TD_MEM_ROW_KV_VER_SIZE sizeof(int16_t)
#define TD_MEM_ROW_KV_TYPE_VER_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_MEM_ROW_KV_VER_SIZE)
#define TD_MEM_ROW_DATA_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_DATA_ROW_HEAD_SIZE)
// #define TD_MEM_ROW_KV_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_MEM_ROW_KV_VER_SIZE + TD_KV_ROW_HEAD_SIZE)
#define TD_MEM_ROW_KV_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_MEM_ROW_KV_VER_SIZE + TD_KV_ROW_HEAD_SIZE)
#define SMEM_ROW_DATA 0U // SDataRow
#define SMEM_ROW_KV 1U // SKVRow
......@@ -537,27 +633,80 @@ typedef void *SMemRow;
#define memRowSetType(r, t) (memRowType(r) = (t))
#define memRowSetLen(r, l) (isDataRow(r) ? memRowDataLen(r) = (l) : memRowKvLen(r) = (l))
#define memRowSetVersion(r, v) (isDataRow(r) ? dataRowSetVersion(memRowDataBody(r), v) : memRowKvSetVersion(r, v))
#define memRowSetVersion(r, v) (isDataRow(r) ? dataRowSetVersion(memRowDataBody(r), v) : memRowSetKvVersion(r, v))
#define memRowCpy(dst, r) memcpy((dst), (r), memRowTLen(r))
#define memRowMaxBytesFromSchema(s) (schemaTLen(s) + TD_MEM_ROW_DATA_HEAD_SIZE)
#define memRowDeleted(r) TKEY_IS_DELETED(memRowTKey(r))
SMemRow tdMemRowDup(SMemRow row);
void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols);
// NOTE: offset here including the header size
static FORCE_INLINE void *tdGetKvRowDataOfCol(void *row, int32_t offset) { return POINTER_SHIFT(row, offset); }
void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull);
// NOTE: offset here including the header size
static FORCE_INLINE void *tdGetMemRowDataOfCol(void *row, int8_t type, int32_t offset) {
static FORCE_INLINE void *tdGetMemRowDataOfCol(void *row, int16_t colId, int8_t colType, uint16_t offset) {
if (isDataRow(row)) {
return tdGetRowDataOfCol(row, type, offset);
} else if (isKvRow(row)) {
return tdGetKvRowDataOfCol(row, offset);
return tdGetRowDataOfCol(memRowDataBody(row), colType, offset);
} else {
ASSERT(0);
return tdGetKVRowValOfCol(memRowKvBody(row), colId);
}
return NULL;
}
/**
* NOTE:
* 1. Applicable to scan columns one by one
* 2. offset here including the header size
*/
static FORCE_INLINE void *tdGetMemRowDataOfColEx(void *row, int16_t colId, int8_t colType, int32_t offset,
int32_t *kvNIdx) {
if (isDataRow(row)) {
return tdGetRowDataOfCol(memRowDataBody(row), colType, offset);
} else {
return tdGetKVRowValOfColEx(memRowKvBody(row), colId, kvNIdx);
}
}
static FORCE_INLINE int tdAppendMemColVal(SMemRow row, const void *value, int16_t colId, int8_t type, int32_t offset,
int32_t *kvOffset) {
if (isDataRow(row)) {
tdAppendColVal(memRowDataBody(row), value, type, offset);
} else {
tdAppendKvColVal(memRowKvBody(row), value, colId, type, kvOffset);
}
return 0;
}
// make sure schema->flen appended for SDataRow
static FORCE_INLINE int32_t tdGetColAppendLen(uint8_t rowType, const void *value, int8_t colType) {
int32_t len = 0;
if (IS_VAR_DATA_TYPE(colType)) {
len += varDataTLen(value);
if (rowType == SMEM_ROW_KV) {
len += sizeof(SColIdx);
}
} else {
if (rowType == SMEM_ROW_KV) {
len += TYPE_BYTES[colType];
len += sizeof(SColIdx);
}
}
return len;
}
typedef struct {
int16_t colId;
uint8_t colType;
char* colVal;
} SColInfo;
static FORCE_INLINE void setSColInfo(SColInfo* colInfo, int16_t colId, uint8_t colType, char* colVal) {
colInfo->colId = colId;
colInfo->colType = colType;
colInfo->colVal = colVal;
}
SMemRow mergeTwoMemRows(void *buffer, SMemRow row1, SMemRow row2, STSchema *pSchema1, STSchema *pSchema2);
// ----------------- Raw payload structure for row:
/* |<------------ Head ------------->|<----------- body of column data tuple ------------------->|
* | |<----------------- flen ------------->|<--- value part --->|
......@@ -607,4 +756,4 @@ static FORCE_INLINE char *payloadNextCol(char *pCol) { return (char *)POINTER_SH
}
#endif
#endif // _TD_DATA_FORMAT_H_
\ No newline at end of file
#endif // _TD_DATA_FORMAT_H_
......@@ -17,9 +17,10 @@
#include "talgo.h"
#include "tcoding.h"
#include "wchar.h"
#include "tarray.h"
static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2,
int limit2, int tRows);
int limit2, int tRows, bool forceSetNull);
/**
* Duplicate the schema and return a new object
......@@ -418,7 +419,8 @@ void tdResetDataCols(SDataCols *pCols) {
}
}
}
static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols) {
static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull) {
ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) < dataRowKey(row));
int rcol = 0;
......@@ -452,8 +454,10 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols
} else if (pRowCol->colId < pDataCol->colId) {
rcol++;
} else {
// dataColSetNullAt(pDataCol, pCols->numOfRows);
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
if(forceSetNull) {
//dataColSetNullAt(pDataCol, pCols->numOfRows);
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
}
dcol++;
}
}
......@@ -461,7 +465,7 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols
pCols->numOfRows++;
}
static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCols) {
static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull) {
ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) < kvRowKey(row));
int rcol = 0;
......@@ -498,8 +502,10 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo
} else if (colIdx->colId < pDataCol->colId) {
++rcol;
} else {
// dataColSetNullAt(pDataCol, pCols->numOfRows);
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
if (forceSetNull) {
// dataColSetNullAt(pDataCol, pCols->numOfRows);
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
}
++dcol;
}
}
......@@ -507,17 +513,17 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo
pCols->numOfRows++;
}
void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols) {
void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull) {
if (isDataRow(row)) {
tdAppendDataRowToDataCol(memRowDataBody(row), pSchema, pCols);
tdAppendDataRowToDataCol(memRowDataBody(row), pSchema, pCols, forceSetNull);
} else if (isKvRow(row)) {
tdAppendKvRowToDataCol(memRowKvBody(row), pSchema, pCols);
tdAppendKvRowToDataCol(memRowKvBody(row), pSchema, pCols, forceSetNull);
} else {
ASSERT(0);
}
}
int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset) {
int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset, bool forceSetNull) {
ASSERT(rowsToMerge > 0 && rowsToMerge <= source->numOfRows);
ASSERT(target->numOfCols == source->numOfCols);
int offset = 0;
......@@ -546,7 +552,7 @@ int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *
int iter1 = 0;
tdMergeTwoDataCols(target, pTarget, &iter1, pTarget->numOfRows, source, pOffset, source->numOfRows,
pTarget->numOfRows + rowsToMerge);
pTarget->numOfRows + rowsToMerge, forceSetNull);
}
tdFreeDataCols(pTarget);
......@@ -559,7 +565,7 @@ _err:
// src2 data has more priority than src1
static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2,
int limit2, int tRows) {
int limit2, int tRows, bool forceSetNull) {
tdResetDataCols(target);
ASSERT(limit1 <= src1->numOfRows && limit2 <= src2->numOfRows);
......@@ -588,7 +594,7 @@ static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, i
if ((key1 > key2) || (key1 == key2 && !TKEY_IS_DELETED(tkey2))) {
for (int i = 0; i < src2->numOfCols; i++) {
ASSERT(target->cols[i].type == src2->cols[i].type);
if (src2->cols[i].len > 0) {
if (src2->cols[i].len > 0 && (forceSetNull || (!forceSetNull && !isNull(src2->cols[i].pData, src2->cols[i].type)))) {
dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src2->cols + i, *iter2), target->numOfRows,
target->maxPoints);
}
......@@ -634,42 +640,28 @@ int tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value) {
SKVRow nrow = NULL;
void * ptr = taosbsearch(&colId, kvRowColIdx(row), kvRowNCols(row), sizeof(SColIdx), comparTagId, TD_GE);
if (ptr == NULL || ((SColIdx *)ptr)->colId > colId) { // need to add a column value to the row
if (ptr == NULL || ((SColIdx *)ptr)->colId > colId) { // need to add a column value to the row
int diff = IS_VAR_DATA_TYPE(type) ? varDataTLen(value) : TYPE_BYTES[type];
nrow = malloc(kvRowLen(row) + sizeof(SColIdx) + diff);
int nRowLen = kvRowLen(row) + sizeof(SColIdx) + diff;
int oRowCols = kvRowNCols(row);
ASSERT(diff > 0);
nrow = malloc(nRowLen);
if (nrow == NULL) return -1;
kvRowSetLen(nrow, kvRowLen(row) + (uint16_t)sizeof(SColIdx) + diff);
kvRowSetNCols(nrow, kvRowNCols(row) + 1);
kvRowSetLen(nrow, nRowLen);
kvRowSetNCols(nrow, oRowCols + 1);
if (ptr == NULL) {
memcpy(kvRowColIdx(nrow), kvRowColIdx(row), sizeof(SColIdx) * kvRowNCols(row));
memcpy(kvRowValues(nrow), kvRowValues(row), POINTER_DISTANCE(kvRowEnd(row), kvRowValues(row)));
int colIdx = kvRowNCols(nrow) - 1;
kvRowColIdxAt(nrow, colIdx)->colId = colId;
kvRowColIdxAt(nrow, colIdx)->offset = (int16_t)(POINTER_DISTANCE(kvRowEnd(row), kvRowValues(row)));
memcpy(kvRowColVal(nrow, kvRowColIdxAt(nrow, colIdx)), value, diff);
} else {
int16_t tlen = (int16_t)(POINTER_DISTANCE(ptr, kvRowColIdx(row)));
if (tlen > 0) {
memcpy(kvRowColIdx(nrow), kvRowColIdx(row), tlen);
memcpy(kvRowValues(nrow), kvRowValues(row), ((SColIdx *)ptr)->offset);
}
memcpy(kvRowColIdx(nrow), kvRowColIdx(row), sizeof(SColIdx) * oRowCols);
memcpy(kvRowValues(nrow), kvRowValues(row), kvRowValLen(row));
int colIdx = tlen / sizeof(SColIdx);
kvRowColIdxAt(nrow, colIdx)->colId = colId;
kvRowColIdxAt(nrow, colIdx)->offset = ((SColIdx *)ptr)->offset;
memcpy(kvRowColVal(nrow, kvRowColIdxAt(nrow, colIdx)), value, diff);
pColIdx = kvRowColIdxAt(nrow, oRowCols);
pColIdx->colId = colId;
pColIdx->offset = kvRowValLen(row);
for (int i = colIdx; i < kvRowNCols(row); i++) {
kvRowColIdxAt(nrow, i + 1)->colId = kvRowColIdxAt(row, i)->colId;
kvRowColIdxAt(nrow, i + 1)->offset = kvRowColIdxAt(row, i)->offset + diff;
}
memcpy(kvRowColVal(nrow, kvRowColIdxAt(nrow, colIdx + 1)), kvRowColVal(row, kvRowColIdxAt(row, colIdx)),
POINTER_DISTANCE(kvRowEnd(row), kvRowColVal(row, kvRowColIdxAt(row, colIdx)))
memcpy(kvRowColVal(nrow, pColIdx), value, diff); // copy new value
);
}
tdSortKVRowByColIdx(nrow);
*orow = nrow;
free(row);
......@@ -680,9 +672,8 @@ int tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value) {
if (varDataTLen(value) == varDataTLen(pOldVal)) { // just update the column value in place
memcpy(pOldVal, value, varDataTLen(value));
} else { // need to reallocate the memory
uint16_t diff = varDataTLen(value) - varDataTLen(pOldVal);
uint16_t nlen = kvRowLen(row) + diff;
} else { // need to reallocate the memory
int16_t nlen = kvRowLen(row) + (varDataTLen(value) - varDataTLen(pOldVal));
ASSERT(nlen > 0);
nrow = malloc(nlen);
if (nrow == NULL) return -1;
......@@ -690,30 +681,22 @@ int tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value) {
kvRowSetLen(nrow, nlen);
kvRowSetNCols(nrow, kvRowNCols(row));
// Copy part ahead
nlen = (int16_t)(POINTER_DISTANCE(ptr, kvRowColIdx(row)));
ASSERT(nlen % sizeof(SColIdx) == 0);
if (nlen > 0) {
ASSERT(((SColIdx *)ptr)->offset > 0);
memcpy(kvRowColIdx(nrow), kvRowColIdx(row), nlen);
memcpy(kvRowValues(nrow), kvRowValues(row), ((SColIdx *)ptr)->offset);
int zsize = sizeof(SColIdx) * kvRowNCols(row) + ((SColIdx *)ptr)->offset;
memcpy(kvRowColIdx(nrow), kvRowColIdx(row), zsize);
memcpy(kvRowColVal(nrow, ((SColIdx *)ptr)), value, varDataTLen(value));
// Copy left value part
int lsize = kvRowLen(row) - TD_KV_ROW_HEAD_SIZE - zsize - varDataTLen(pOldVal);
if (lsize > 0) {
memcpy(POINTER_SHIFT(nrow, TD_KV_ROW_HEAD_SIZE + zsize + varDataTLen(value)),
POINTER_SHIFT(row, TD_KV_ROW_HEAD_SIZE + zsize + varDataTLen(pOldVal)), lsize);
}
// Construct current column value
int colIdx = nlen / sizeof(SColIdx);
pColIdx = kvRowColIdxAt(nrow, colIdx);
pColIdx->colId = ((SColIdx *)ptr)->colId;
pColIdx->offset = ((SColIdx *)ptr)->offset;
memcpy(kvRowColVal(nrow, pColIdx), value, varDataTLen(value));
// Construct columns after
if (kvRowNCols(nrow) - colIdx - 1 > 0) {
for (int i = colIdx + 1; i < kvRowNCols(nrow); i++) {
kvRowColIdxAt(nrow, i)->colId = kvRowColIdxAt(row, i)->colId;
kvRowColIdxAt(nrow, i)->offset = kvRowColIdxAt(row, i)->offset + diff;
for (int i = 0; i < kvRowNCols(nrow); i++) {
pColIdx = kvRowColIdxAt(nrow, i);
if (pColIdx->offset > ((SColIdx *)ptr)->offset) {
pColIdx->offset = pColIdx->offset - varDataTLen(pOldVal) + varDataTLen(value);
}
memcpy(kvRowColVal(nrow, kvRowColIdxAt(nrow, colIdx + 1)), kvRowColVal(row, kvRowColIdxAt(row, colIdx + 1)),
POINTER_DISTANCE(kvRowEnd(row), kvRowColVal(row, kvRowColIdxAt(row, colIdx + 1))));
}
*orow = nrow;
......@@ -784,4 +767,97 @@ SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder) {
memcpy(kvRowValues(row), pBuilder->buf, pBuilder->size);
return row;
}
\ No newline at end of file
}
SMemRow mergeTwoMemRows(void *buffer, SMemRow row1, SMemRow row2, STSchema *pSchema1, STSchema *pSchema2) {
#if 0
ASSERT(memRowKey(row1) == memRowKey(row2));
ASSERT(schemaVersion(pSchema1) == memRowVersion(row1));
ASSERT(schemaVersion(pSchema2) == memRowVersion(row2));
ASSERT(schemaVersion(pSchema1) >= schemaVersion(pSchema2));
#endif
SArray *stashRow = taosArrayInit(pSchema1->numOfCols, sizeof(SColInfo));
if (stashRow == NULL) {
return NULL;
}
SMemRow pRow = buffer;
SDataRow dataRow = memRowDataBody(pRow);
memRowSetType(pRow, SMEM_ROW_DATA);
dataRowSetVersion(dataRow, schemaVersion(pSchema1)); // use latest schema version
dataRowSetLen(dataRow, (TDRowLenT)(TD_DATA_ROW_HEAD_SIZE + pSchema1->flen));
TDRowTLenT dataLen = 0, kvLen = TD_MEM_ROW_KV_HEAD_SIZE;
int32_t i = 0; // row1
int32_t j = 0; // row2
int32_t nCols1 = schemaNCols(pSchema1);
int32_t nCols2 = schemaNCols(pSchema2);
SColInfo colInfo = {0};
int32_t kvIdx1 = 0, kvIdx2 = 0;
while (i < nCols1) {
STColumn *pCol = schemaColAt(pSchema1, i);
void * val1 = tdGetMemRowDataOfColEx(row1, pCol->colId, pCol->type, TD_DATA_ROW_HEAD_SIZE + pCol->offset, &kvIdx1);
// if val1 != NULL, use val1;
if (val1 != NULL && !isNull(val1, pCol->type)) {
tdAppendColVal(dataRow, val1, pCol->type, pCol->offset);
kvLen += tdGetColAppendLen(SMEM_ROW_KV, val1, pCol->type);
setSColInfo(&colInfo, pCol->colId, pCol->type, val1);
taosArrayPush(stashRow, &colInfo);
++i; // next col
continue;
}
void *val2 = NULL;
while (j < nCols2) {
STColumn *tCol = schemaColAt(pSchema2, j);
if (tCol->colId < pCol->colId) {
++j;
continue;
}
if (tCol->colId == pCol->colId) {
val2 = tdGetMemRowDataOfColEx(row2, tCol->colId, tCol->type, TD_DATA_ROW_HEAD_SIZE + tCol->offset, &kvIdx2);
} else if (tCol->colId > pCol->colId) {
// set NULL
}
break;
} // end of while(j<nCols2)
if (val2 == NULL) {
val2 = (void *)getNullValue(pCol->type);
}
tdAppendColVal(dataRow, val2, pCol->type, pCol->offset);
if (!isNull(val2, pCol->type)) {
kvLen += tdGetColAppendLen(SMEM_ROW_KV, val2, pCol->type);
setSColInfo(&colInfo, pCol->colId, pCol->type, val2);
taosArrayPush(stashRow, &colInfo);
}
++i; // next col
}
dataLen = memRowTLen(pRow);
if (kvLen < dataLen) {
// scan stashRow and generate SKVRow
memset(buffer, 0, sizeof(dataLen));
SMemRow tRow = buffer;
memRowSetType(tRow, SMEM_ROW_KV);
SKVRow kvRow = (SKVRow)memRowKvBody(tRow);
int16_t nKvNCols = (int16_t) taosArrayGetSize(stashRow);
kvRowSetLen(kvRow, (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nKvNCols));
kvRowSetNCols(kvRow, nKvNCols);
memRowSetKvVersion(tRow, pSchema1->version);
int32_t toffset = 0;
int16_t k;
for (k = 0; k < nKvNCols; ++k) {
SColInfo *pColInfo = taosArrayGet(stashRow, k);
tdAppendKvColVal(kvRow, pColInfo->colVal, pColInfo->colId, pColInfo->colType, &toffset);
}
ASSERT(kvLen == memRowTLen(tRow));
}
taosArrayDestroy(stashRow);
return buffer;
}
......@@ -409,7 +409,7 @@ bool isValidDataType(int32_t type) {
return type >= TSDB_DATA_TYPE_NULL && type <= TSDB_DATA_TYPE_UBIGINT;
}
void setVardataNull(char* val, int32_t type) {
void setVardataNull(void* val, int32_t type) {
if (type == TSDB_DATA_TYPE_BINARY) {
varDataSetLen(val, sizeof(int8_t));
*(uint8_t*) varDataVal(val) = TSDB_DATA_BINARY_NULL;
......@@ -421,75 +421,75 @@ void setVardataNull(char* val, int32_t type) {
}
}
void setNull(char *val, int32_t type, int32_t bytes) { setNullN(val, type, bytes, 1); }
void setNull(void *val, int32_t type, int32_t bytes) { setNullN(val, type, bytes, 1); }
void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems) {
void setNullN(void *val, int32_t type, int32_t bytes, int32_t numOfElems) {
switch (type) {
case TSDB_DATA_TYPE_BOOL:
for (int32_t i = 0; i < numOfElems; ++i) {
*(uint8_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_BOOL_NULL;
*(uint8_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_BOOL_NULL;
}
break;
case TSDB_DATA_TYPE_TINYINT:
for (int32_t i = 0; i < numOfElems; ++i) {
*(uint8_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_TINYINT_NULL;
*(uint8_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_TINYINT_NULL;
}
break;
case TSDB_DATA_TYPE_SMALLINT:
for (int32_t i = 0; i < numOfElems; ++i) {
*(uint16_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_SMALLINT_NULL;
*(uint16_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_SMALLINT_NULL;
}
break;
case TSDB_DATA_TYPE_INT:
for (int32_t i = 0; i < numOfElems; ++i) {
*(uint32_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_INT_NULL;
*(uint32_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_INT_NULL;
}
break;
case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_TIMESTAMP:
for (int32_t i = 0; i < numOfElems; ++i) {
*(uint64_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_BIGINT_NULL;
*(uint64_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_BIGINT_NULL;
}
break;
case TSDB_DATA_TYPE_UTINYINT:
for (int32_t i = 0; i < numOfElems; ++i) {
*(uint8_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_UTINYINT_NULL;
*(uint8_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_UTINYINT_NULL;
}
break;
case TSDB_DATA_TYPE_USMALLINT:
for (int32_t i = 0; i < numOfElems; ++i) {
*(uint16_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_USMALLINT_NULL;
*(uint16_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_USMALLINT_NULL;
}
break;
case TSDB_DATA_TYPE_UINT:
for (int32_t i = 0; i < numOfElems; ++i) {
*(uint32_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_UINT_NULL;
*(uint32_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_UINT_NULL;
}
break;
case TSDB_DATA_TYPE_UBIGINT:
for (int32_t i = 0; i < numOfElems; ++i) {
*(uint64_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_UBIGINT_NULL;
*(uint64_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_UBIGINT_NULL;
}
break;
case TSDB_DATA_TYPE_FLOAT:
for (int32_t i = 0; i < numOfElems; ++i) {
*(uint32_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_FLOAT_NULL;
*(uint32_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_FLOAT_NULL;
}
break;
case TSDB_DATA_TYPE_DOUBLE:
for (int32_t i = 0; i < numOfElems; ++i) {
*(uint64_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_DOUBLE_NULL;
*(uint64_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_DOUBLE_NULL;
}
break;
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_BINARY:
for (int32_t i = 0; i < numOfElems; ++i) {
setVardataNull(val + i * bytes, type);
setVardataNull(POINTER_SHIFT(val, i * bytes), type);
}
break;
default: {
for (int32_t i = 0; i < numOfElems; ++i) {
*(uint32_t *)(val + i * tDataTypes[TSDB_DATA_TYPE_INT].bytes) = TSDB_DATA_INT_NULL;
*(uint32_t *)(POINTER_SHIFT(val, i * tDataTypes[TSDB_DATA_TYPE_INT].bytes)) = TSDB_DATA_INT_NULL;
}
break;
}
......
......@@ -163,5 +163,8 @@ namespace TDengineDriver
[DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)]
static extern public int Close(IntPtr taos);
//get precisionin parameter restultset
[DllImport("taos", EntryPoint = "taos_result_precision", CallingConvention = CallingConvention.Cdecl)]
static extern public int ResultPrecision(IntPtr taos);
}
}
......@@ -307,7 +307,7 @@ do { \
#define TSDB_DEFAULT_WAL_LEVEL 1
#define TSDB_MIN_DB_UPDATE 0
#define TSDB_MAX_DB_UPDATE 1
#define TSDB_MAX_DB_UPDATE 2
#define TSDB_DEFAULT_DB_UPDATE_OPTION 0
#define TSDB_MIN_DB_CACHE_LAST_ROW 0
......@@ -436,6 +436,12 @@ typedef enum {
TSDB_CHECK_ITEM_MAX
} ECheckItemType;
typedef enum {
TD_ROW_DISCARD_UPDATE = 0,
TD_ROW_OVERWRITE_UPDATE = 1,
TD_ROW_PARTIAL_UPDATE = 2
} TDUpdateConfig;
extern char *qtypeStr[];
#ifdef __cplusplus
......
......@@ -111,7 +111,7 @@ typedef struct {
uint64_t superUid;
STSchema * schema;
STSchema * tagSchema;
SDataRow tagValues;
SKVRow tagValues;
char * sql;
} STableCfg;
......
......@@ -141,7 +141,7 @@ typedef struct {
#define IS_VALID_FLOAT(_t) ((_t) >= -FLT_MAX && (_t) <= FLT_MAX)
#define IS_VALID_DOUBLE(_t) ((_t) >= -DBL_MAX && (_t) <= DBL_MAX)
static FORCE_INLINE bool isNull(const char *val, int32_t type) {
static FORCE_INLINE bool isNull(const void *val, int32_t type) {
switch (type) {
case TSDB_DATA_TYPE_BOOL:
return *(uint8_t *)val == TSDB_DATA_BOOL_NULL;
......@@ -193,9 +193,9 @@ extern tDataTypeDescriptor tDataTypes[15];
bool isValidDataType(int32_t type);
void setVardataNull(char* val, int32_t type);
void setNull(char *val, int32_t type, int32_t bytes);
void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems);
void setVardataNull(void* val, int32_t type);
void setNull(void *val, int32_t type, int32_t bytes);
void setNullN(void *val, int32_t type, int32_t bytes, int32_t numOfElems);
const void *getNullValue(int32_t type);
void assignVal(char *val, const char *src, int32_t len, int32_t type);
......
......@@ -92,7 +92,6 @@ extern char configDir[];
#define MAX_SUPER_TABLE_COUNT 200
#define MAX_QUERY_SQL_COUNT 100
#define MAX_QUERY_SQL_LENGTH BUFFER_SIZE
#define MAX_DATABASE_COUNT 256
#define INPUT_BUF_LEN 256
......@@ -383,7 +382,7 @@ typedef struct SpecifiedQueryInfo_S {
uint64_t queryTimes;
bool subscribeRestart;
int subscribeKeepProgress;
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
char sql[MAX_QUERY_SQL_COUNT][BUFFER_SIZE+1];
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN];
int resubAfterConsume[MAX_QUERY_SQL_COUNT];
int endAfterConsume[MAX_QUERY_SQL_COUNT];
......@@ -406,7 +405,7 @@ typedef struct SuperQueryInfo_S {
int64_t childTblCount;
char childTblPrefix[TBNAME_PREFIX_LEN]; // 20 characters reserved for seq
int sqlCount;
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
char sql[MAX_QUERY_SQL_COUNT][BUFFER_SIZE+1];
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN];
int resubAfterConsume;
int endAfterConsume;
......@@ -1253,14 +1252,14 @@ static void fetchResult(TAOS_RES *res, threadInfo* pThreadInfo) {
// fetch the records row by row
while((row = taos_fetch_row(res))) {
if (totalLen >= 100*1024*1024 - 32000) {
if (totalLen >= (100*1024*1024 - HEAD_BUFF_LEN*2)) {
if (strlen(pThreadInfo->filePath) > 0)
appendResultBufToFile(databuf, pThreadInfo);
totalLen = 0;
memset(databuf, 0, 100*1024*1024);
}
num_rows++;
char temp[16000] = {0};
char temp[HEAD_BUFF_LEN] = {0};
int len = taos_print_row(temp, row, fields, num_fields);
len += sprintf(temp + len, "\n");
//printf("query result:%s\n", temp);
......@@ -2165,15 +2164,15 @@ static void printfDbInfoForQueryToFile(
}
static void printfQuerySystemInfo(TAOS * taos) {
char filename[MAX_QUERY_SQL_LENGTH+1] = {0};
char buffer[MAX_QUERY_SQL_LENGTH+1] = {0};
char filename[BUFFER_SIZE+1] = {0};
char buffer[BUFFER_SIZE+1] = {0};
TAOS_RES* res;
time_t t;
struct tm* lt;
time(&t);
lt = localtime(&t);
snprintf(filename, MAX_QUERY_SQL_LENGTH, "querySystemInfo-%d-%d-%d %d:%d:%d",
snprintf(filename, BUFFER_SIZE, "querySystemInfo-%d-%d-%d %d:%d:%d",
lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min,
lt->tm_sec);
......@@ -2205,12 +2204,12 @@ static void printfQuerySystemInfo(TAOS * taos) {
printfDbInfoForQueryToFile(filename, dbInfos[i], i);
// show db.vgroups
snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.vgroups;", dbInfos[i]->name);
snprintf(buffer, BUFFER_SIZE, "show %s.vgroups;", dbInfos[i]->name);
res = taos_query(taos, buffer);
xDumpResultToFile(filename, res);
// show db.stables
snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.stables;", dbInfos[i]->name);
snprintf(buffer, BUFFER_SIZE, "show %s.stables;", dbInfos[i]->name);
res = taos_query(taos, buffer);
xDumpResultToFile(filename, res);
free(dbInfos[i]);
......@@ -4549,7 +4548,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
goto PARSE_OVER;
}
tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j],
sqlStr->valuestring, MAX_QUERY_SQL_LENGTH);
sqlStr->valuestring, BUFFER_SIZE);
// default value is -1, which mean infinite loop
g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1;
......@@ -4771,7 +4770,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
goto PARSE_OVER;
}
tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring,
MAX_QUERY_SQL_LENGTH);
BUFFER_SIZE);
cJSON *result = cJSON_GetObjectItem(sql, "result");
if (result != NULL && result->type == cJSON_String
......@@ -7425,14 +7424,14 @@ static void replaceChildTblName(char* inSql, char* outSql, int tblIndex) {
tstrncpy(outSql, inSql, pos - inSql + 1);
//printf("1: %s\n", outSql);
strncat(outSql, subTblName, MAX_QUERY_SQL_LENGTH - 1);
strncat(outSql, subTblName, BUFFER_SIZE - 1);
//printf("2: %s\n", outSql);
strncat(outSql, pos+strlen(sourceString), MAX_QUERY_SQL_LENGTH - 1);
strncat(outSql, pos+strlen(sourceString), BUFFER_SIZE - 1);
//printf("3: %s\n", outSql);
}
static void *superTableQuery(void *sarg) {
char sqlstr[MAX_QUERY_SQL_LENGTH];
char sqlstr[BUFFER_SIZE];
threadInfo *pThreadInfo = (threadInfo *)sarg;
setThreadName("superTableQuery");
......@@ -7735,7 +7734,7 @@ static TAOS_SUB* subscribeImpl(
static void *superSubscribe(void *sarg) {
threadInfo *pThreadInfo = (threadInfo *)sarg;
char subSqlstr[MAX_QUERY_SQL_LENGTH];
char subSqlstr[BUFFER_SIZE];
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0};
uint64_t tsubSeq;
......
......@@ -184,12 +184,12 @@ static void getNextTimeWindow(SQueryAttr* pQueryAttr, STimeWindow* tw) {
int mon = (int)(tm.tm_year * 12 + tm.tm_mon + interval * factor);
tm.tm_year = mon / 12;
tm.tm_mon = mon % 12;
tw->skey = convertTimePrecision(mktime(&tm) * 1000L, TSDB_TIME_PRECISION_MILLI, pQueryAttr->precision);
tw->skey = convertTimePrecision((int64_t)mktime(&tm) * 1000L, TSDB_TIME_PRECISION_MILLI, pQueryAttr->precision);
mon = (int)(mon + interval);
tm.tm_year = mon / 12;
tm.tm_mon = mon % 12;
tw->ekey = convertTimePrecision(mktime(&tm) * 1000L, TSDB_TIME_PRECISION_MILLI, pQueryAttr->precision);
tw->ekey = convertTimePrecision((int64_t)mktime(&tm) * 1000L, TSDB_TIME_PRECISION_MILLI, pQueryAttr->precision);
tw->ekey -= 1;
}
......
......@@ -40,7 +40,7 @@ void tsdbFreeBufPool(STsdbBufPool* pBufPool);
int tsdbOpenBufPool(STsdbRepo* pRepo);
void tsdbCloseBufPool(STsdbRepo* pRepo);
SListNode* tsdbAllocBufBlockFromPool(STsdbRepo* pRepo);
int tsdbExpendPool(STsdbRepo* pRepo, int32_t oldTotalBlocks);
int tsdbExpandPool(STsdbRepo* pRepo, int32_t oldTotalBlocks);
void tsdbRecycleBufferBlock(STsdbBufPool* pPool, SListNode *pNode);
#endif /* _TD_TSDB_BUFFER_H_ */
\ No newline at end of file
#endif /* _TD_TSDB_BUFFER_H_ */
......@@ -16,6 +16,13 @@
#ifndef _TD_TSDB_READ_IMPL_H_
#define _TD_TSDB_READ_IMPL_H_
#include "tfs.h"
#include "tsdb.h"
#include "os.h"
#include "tsdbFile.h"
#include "tskiplist.h"
#include "tsdbMeta.h"
typedef struct SReadH SReadH;
typedef struct {
......@@ -143,4 +150,4 @@ static FORCE_INLINE int tsdbMakeRoom(void **ppBuf, size_t size) {
return 0;
}
#endif /*_TD_TSDB_READ_IMPL_H_*/
\ No newline at end of file
#endif /*_TD_TSDB_READ_IMPL_H_*/
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TSDB_ROW_MERGE_BUF_H
#define TSDB_ROW_MERGE_BUF_H
#ifdef __cplusplus
extern "C" {
#endif
#include "tsdb.h"
#include "tchecksum.h"
#include "tsdbReadImpl.h"
typedef void* SMergeBuf;
SDataRow tsdbMergeTwoRows(SMergeBuf *pBuf, SMemRow row1, SMemRow row2, STSchema *pSchema1, STSchema *pSchema2);
static FORCE_INLINE int tsdbMergeBufMakeSureRoom(SMergeBuf *pBuf, STSchema* pSchema1, STSchema* pSchema2) {
return tsdbMakeRoom(pBuf, MAX(dataRowMaxBytesFromSchema(pSchema1), dataRowMaxBytesFromSchema(pSchema2)));
}
static FORCE_INLINE void tsdbFreeMergeBuf(SMergeBuf buf) {
taosTZfree(buf);
}
#ifdef __cplusplus
}
#endif
#endif /* ifndef TSDB_ROW_MERGE_BUF_H */
......@@ -68,6 +68,8 @@ extern "C" {
#include "tsdbCompact.h"
// Commit Queue
#include "tsdbCommitQueue.h"
#include "tsdbRowMergeBuf.h"
// Main definitions
struct STsdbRepo {
uint8_t state;
......@@ -92,6 +94,7 @@ struct STsdbRepo {
pthread_mutex_t mutex;
bool repoLocked;
int32_t code; // Commit code
SMergeBuf mergeBuf; //used when update=2
bool inCompact; // is in compact process?
};
......@@ -139,4 +142,4 @@ static FORCE_INLINE int tsdbGetNextMaxTables(int tid) {
}
#endif
#endif /* _TD_TSDB_INT_H_ */
\ No newline at end of file
#endif /* _TD_TSDB_INT_H_ */
......@@ -159,7 +159,7 @@ _err:
static void tsdbFreeBufBlock(STsdbBufBlock *pBufBlock) { tfree(pBufBlock); }
int tsdbExpendPool(STsdbRepo* pRepo, int32_t oldTotalBlocks) {
int tsdbExpandPool(STsdbRepo* pRepo, int32_t oldTotalBlocks) {
if (oldTotalBlocks == pRepo->config.totalBlocks) {
return TSDB_CODE_SUCCESS;
}
......@@ -199,4 +199,4 @@ void tsdbRecycleBufferBlock(STsdbBufPool* pPool, SListNode *pNode) {
tsdbFreeBufBlock(pBufBlock);
free(pNode);
pPool->nBufBlocks--;
}
\ No newline at end of file
}
......@@ -1290,7 +1290,7 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt
ASSERT(pSchema != NULL);
}
tdAppendMemRowToDataCol(row, pSchema, pTarget);
tdAppendMemRowToDataCol(row, pSchema, pTarget, true);
}
tSkipListIterNext(pCommitIter->pIter);
......@@ -1302,7 +1302,7 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt
ASSERT(pSchema != NULL);
}
tdAppendMemRowToDataCol(row, pSchema, pTarget);
tdAppendMemRowToDataCol(row, pSchema, pTarget, update == TD_ROW_OVERWRITE_UPDATE);
}
} else {
ASSERT(!isRowDel);
......
......@@ -138,7 +138,7 @@ static void tsdbApplyRepoConfig(STsdbRepo *pRepo) {
pSaveCfg->compression, pSaveCfg->keep,pSaveCfg->keep1, pSaveCfg->keep2,
pSaveCfg->totalBlocks, oldCfg.cacheLastRow, pSaveCfg->cacheLastRow, oldTotalBlocks, pSaveCfg->totalBlocks);
int err = tsdbExpendPool(pRepo, oldTotalBlocks);
int err = tsdbExpandPool(pRepo, oldTotalBlocks);
if (!TAOS_SUCCEEDED(err)) {
tsdbError("vgId:%d expand pool from %d to %d fail,reason:%s",
REPO_ID(pRepo), oldTotalBlocks, pSaveCfg->totalBlocks, tstrerror(err));
......
......@@ -455,7 +455,7 @@ static int tsdbCompactMeta(STsdbRepo *pRepo) {
if (pReadh->pDCols[0]->numOfRows - ridx == 0) break;
int rowsToMerge = MIN(pReadh->pDCols[0]->numOfRows - ridx, defaultRows - pComph->pDataCols->numOfRows);
tdMergeDataCols(pComph->pDataCols, pReadh->pDCols[0], rowsToMerge, &ridx);
tdMergeDataCols(pComph->pDataCols, pReadh->pDCols[0], rowsToMerge, &ridx, pCfg->update != TD_ROW_PARTIAL_UPDATE);
if (pComph->pDataCols->numOfRows < defaultRows) {
break;
......
......@@ -14,6 +14,7 @@
*/
// no test file errors here
#include "taosdef.h"
#include "tsdbint.h"
#define IS_VALID_PRECISION(precision) \
......@@ -106,6 +107,8 @@ STsdbRepo *tsdbOpenRepo(STsdbCfg *pCfg, STsdbAppH *pAppH) {
return NULL;
}
pRepo->mergeBuf = NULL;
tsdbStartStream(pRepo);
tsdbDebug("vgId:%d, TSDB repository opened", REPO_ID(pRepo));
......@@ -518,7 +521,8 @@ static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg) {
}
// update check
if (pCfg->update != 0) pCfg->update = 1;
if (pCfg->update < TD_ROW_DISCARD_UPDATE || pCfg->update > TD_ROW_PARTIAL_UPDATE)
pCfg->update = TD_ROW_DISCARD_UPDATE;
// update cacheLastRow
if (pCfg->cacheLastRow != 0) {
......@@ -597,6 +601,7 @@ static void tsdbFreeRepo(STsdbRepo *pRepo) {
tsdbFreeFS(pRepo->fs);
tsdbFreeBufPool(pRepo->pPool);
tsdbFreeMeta(pRepo->tsdbMeta);
tsdbFreeMergeBuf(pRepo->mergeBuf);
// tsdbFreeMemTable(pRepo->mem);
// tsdbFreeMemTable(pRepo->imem);
tsem_destroy(&(pRepo->readyToCommit));
......
......@@ -13,7 +13,11 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "tdataformat.h"
#include "tfunctional.h"
#include "tsdbint.h"
#include "tskiplist.h"
#include "tsdbRowMergeBuf.h"
#define TSDB_DATA_SKIPLIST_LEVEL 5
#define TSDB_MAX_INSERT_BATCH 512
......@@ -30,24 +34,22 @@ typedef struct {
void * pMsg;
} SSubmitMsgIter;
static SMemTable * tsdbNewMemTable(STsdbRepo *pRepo);
static void tsdbFreeMemTable(SMemTable *pMemTable);
static STableData *tsdbNewTableData(STsdbCfg *pCfg, STable *pTable);
static void tsdbFreeTableData(STableData *pTableData);
static char * tsdbGetTsTupleKey(const void *data);
static SMemTable * tsdbNewMemTable(STsdbRepo *pRepo);
static void tsdbFreeMemTable(SMemTable *pMemTable);
static STableData* tsdbNewTableData(STsdbCfg *pCfg, STable *pTable);
static void tsdbFreeTableData(STableData *pTableData);
static char * tsdbGetTsTupleKey(const void *data);
static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables);
static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema **ppSchema, SMemRow row);
static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema **ppSchema, SMemRow row);
static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter);
static SMemRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter);
static SMemRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter);
static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg);
static int tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, int32_t *affectedrows);
static int tsdbCopyRowToMem(STsdbRepo *pRepo, SMemRow row, STable *pTable, void **ppRow);
static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter);
static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock);
static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable);
static int tsdbInsertDataToTableImpl(STsdbRepo *pRepo, STable *pTable, void **rows, int rowCounter);
static void tsdbFreeRows(STsdbRepo *pRepo, void **rows, int rowCounter);
static int tsdbUpdateTableLatestInfo(STsdbRepo *pRepo, STable *pTable, SMemRow row);
static int tsdbUpdateTableLatestInfo(STsdbRepo *pRepo, STable *pTable, SMemRow row);
static FORCE_INLINE int tsdbCheckRowRange(STsdbRepo *pRepo, STable *pTable, SMemRow row, TSKEY minKey, TSKEY maxKey,
TSKEY now);
......@@ -342,7 +344,7 @@ int tsdbSyncCommit(STsdbRepo *repo) {
* 3. rowsIncreased = rowsInserted - rowsDeleteSucceed >= maxRowsToRead
* 4. operations in pCols not exceeds its max capacity if pCols is given
*
* The function tries to procceed AS MUSH AS POSSIBLE.
* The function tries to procceed AS MUCH AS POSSIBLE.
*/
int tsdbLoadDataFromCache(STable *pTable, SSkipListIterator *pIter, TSKEY maxKey, int maxRowsToRead, SDataCols *pCols,
TKEY *filterKeys, int nFilterKeys, bool keepDup, SMergeInfo *pMergeInfo) {
......@@ -523,9 +525,15 @@ static STableData *tsdbNewTableData(STsdbCfg *pCfg, STable *pTable) {
pTableData->keyLast = 0;
pTableData->numOfRows = 0;
uint8_t skipListCreateFlags;
if(pCfg->update == TD_ROW_DISCARD_UPDATE)
skipListCreateFlags = SL_DISCARD_DUP_KEY;
else
skipListCreateFlags = SL_UPDATE_DUP_KEY;
pTableData->pData =
tSkipListCreate(TSDB_DATA_SKIPLIST_LEVEL, TSDB_DATA_TYPE_TIMESTAMP, TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP],
tkeyComparFn, pCfg->update ? SL_UPDATE_DUP_KEY : SL_DISCARD_DUP_KEY, tsdbGetTsTupleKey);
tkeyComparFn, skipListCreateFlags, tsdbGetTsTupleKey);
if (pTableData->pData == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
free(pTableData);
......@@ -581,7 +589,7 @@ static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema *
}
}
tdAppendMemRowToDataCol(row, *ppSchema, pCols);
tdAppendMemRowToDataCol(row, *ppSchema, pCols, true);
}
return 0;
......@@ -693,95 +701,162 @@ static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg) {
return 0;
}
static int tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, int32_t *affectedrows) {
STsdbMeta * pMeta = pRepo->tsdbMeta;
int64_t points = 0;
STable * pTable = NULL;
SSubmitBlkIter blkIter = {0};
SMemRow row = NULL;
void * rows[TSDB_MAX_INSERT_BATCH] = {0};
int rowCounter = 0;
//row1 has higher priority
static SMemRow tsdbInsertDupKeyMerge(SMemRow row1, SMemRow row2, STsdbRepo* pRepo, STSchema **ppSchema1, STSchema **ppSchema2, STable* pTable, int32_t* pAffectedRows, int64_t* pPoints, SMemRow* pLastRow) {
//for compatiblity, duplicate key inserted when update=0 should be also calculated as affected rows!
if(row1 == NULL && row2 == NULL && pRepo->config.update == TD_ROW_DISCARD_UPDATE) {
(*pAffectedRows)++;
(*pPoints)++;
return NULL;
}
ASSERT(pBlock->tid < pMeta->maxTables);
pTable = pMeta->tables[pBlock->tid];
ASSERT(pTable != NULL && TABLE_UID(pTable) == pBlock->uid);
if(row2 == NULL || pRepo->config.update != TD_ROW_PARTIAL_UPDATE) {
void* pMem = tsdbAllocBytes(pRepo, memRowTLen(row1));
if(pMem == NULL) return NULL;
memRowCpy(pMem, row1);
(*pAffectedRows)++;
(*pPoints)++;
*pLastRow = pMem;
return pMem;
}
tsdbInitSubmitBlkIter(pBlock, &blkIter);
while ((row = tsdbGetSubmitBlkNext(&blkIter)) != NULL) {
if (tsdbCopyRowToMem(pRepo, row, pTable, &(rows[rowCounter])) < 0) {
tsdbFreeRows(pRepo, rows, rowCounter);
goto _err;
STSchema *pSchema1 = *ppSchema1;
STSchema *pSchema2 = *ppSchema2;
SMergeBuf * pBuf = &pRepo->mergeBuf;
int dv1 = memRowVersion(row1);
int dv2 = memRowVersion(row2);
if(pSchema1 == NULL || schemaVersion(pSchema1) != dv1) {
if(pSchema2 != NULL && schemaVersion(pSchema2) == dv1) {
*ppSchema1 = pSchema2;
} else {
*ppSchema1 = tsdbGetTableSchemaImpl(pTable, false, false, memRowVersion(row1));
}
pSchema1 = *ppSchema1;
}
(*affectedrows)++;
points++;
if (rows[rowCounter] != NULL) {
rowCounter++;
if(pSchema2 == NULL || schemaVersion(pSchema2) != dv2) {
if(schemaVersion(pSchema1) == dv2) {
pSchema2 = pSchema1;
} else {
*ppSchema2 = tsdbGetTableSchemaImpl(pTable, false, false, memRowVersion(row2));
pSchema2 = *ppSchema2;
}
}
if (rowCounter == TSDB_MAX_INSERT_BATCH) {
if (tsdbInsertDataToTableImpl(pRepo, pTable, rows, rowCounter) < 0) {
goto _err;
}
SMemRow tmp = tsdbMergeTwoRows(pBuf, row1, row2, pSchema1, pSchema2);
rowCounter = 0;
memset(rows, 0, sizeof(rows));
}
}
void* pMem = tsdbAllocBytes(pRepo, memRowTLen(tmp));
if(pMem == NULL) return NULL;
memRowCpy(pMem, tmp);
if (rowCounter > 0 && tsdbInsertDataToTableImpl(pRepo, pTable, rows, rowCounter) < 0) {
goto _err;
}
(*pAffectedRows)++;
(*pPoints)++;
STSchema *pSchema = tsdbGetTableSchemaByVersion(pTable, pBlock->sversion);
pRepo->stat.pointsWritten += points * schemaNCols(pSchema);
pRepo->stat.totalStorage += points * schemaVLen(pSchema);
*pLastRow = pMem;
return pMem;
}
return 0;
static void* tsdbInsertDupKeyMergePacked(void** args) {
return tsdbInsertDupKeyMerge(args[0], args[1], args[2], (STSchema**)&args[3], (STSchema**)&args[4], args[5], args[6], args[7], args[8]);
}
_err:
return -1;
static void tsdbSetupSkipListHookFns(SSkipList* pSkipList, STsdbRepo *pRepo, STable *pTable, int32_t* pAffectedRows, int64_t* pPoints, SMemRow* pLastRow) {
if(pSkipList->insertHandleFn == NULL) {
tGenericSavedFunc *dupHandleSavedFunc = genericSavedFuncInit((GenericVaFunc)&tsdbInsertDupKeyMergePacked, 9);
dupHandleSavedFunc->args[2] = pRepo;
dupHandleSavedFunc->args[3] = NULL;
dupHandleSavedFunc->args[4] = NULL;
dupHandleSavedFunc->args[5] = pTable;
dupHandleSavedFunc->args[6] = pAffectedRows;
dupHandleSavedFunc->args[7] = pPoints;
dupHandleSavedFunc->args[8] = pLastRow;
pSkipList->insertHandleFn = dupHandleSavedFunc;
}
}
static int tsdbCopyRowToMem(STsdbRepo *pRepo, SMemRow row, STable *pTable, void **ppRow) {
STsdbCfg * pCfg = &pRepo->config;
TKEY tkey = memRowTKey(row);
TSKEY key = memRowKey(row);
bool isRowDelete = TKEY_IS_DELETED(tkey);
static int tsdbInsertDataToTable(STsdbRepo* pRepo, SSubmitBlk* pBlock, int32_t *pAffectedRows) {
if (isRowDelete) {
if (!pCfg->update) {
tsdbWarn("vgId:%d vnode is not allowed to update but try to delete a data row", REPO_ID(pRepo));
terrno = TSDB_CODE_TDB_INVALID_ACTION;
STsdbMeta *pMeta = pRepo->tsdbMeta;
int64_t points = 0;
STable *pTable = NULL;
SSubmitBlkIter blkIter = {0};
SMemTable *pMemTable = NULL;
STableData *pTableData = NULL;
STsdbCfg *pCfg = &(pRepo->config);
tsdbInitSubmitBlkIter(pBlock, &blkIter);
if(blkIter.row == NULL) return 0;
TSKEY firstRowKey = memRowKey(blkIter.row);
tsdbAllocBytes(pRepo, 0);
pMemTable = pRepo->mem;
ASSERT(pMemTable != NULL);
ASSERT(pBlock->tid < pMeta->maxTables);
pTable = pMeta->tables[pBlock->tid];
ASSERT(pTable != NULL && TABLE_UID(pTable) == pBlock->uid);
if (TABLE_TID(pTable) >= pMemTable->maxTables) {
if (tsdbAdjustMemMaxTables(pMemTable, pMeta->maxTables) < 0) {
return -1;
}
}
pTableData = pMemTable->tData[TABLE_TID(pTable)];
if (pTableData == NULL || pTableData->uid != TABLE_UID(pTable)) {
if (pTableData != NULL) {
taosWLockLatch(&(pMemTable->latch));
pMemTable->tData[TABLE_TID(pTable)] = NULL;
tsdbFreeTableData(pTableData);
taosWUnLockLatch(&(pMemTable->latch));
}
TSKEY lastKey = tsdbGetTableLastKeyImpl(pTable);
if (key > lastKey) {
tsdbTrace("vgId:%d skip to delete row key %" PRId64 " which is larger than table lastKey %" PRId64,
REPO_ID(pRepo), key, lastKey);
return 0;
pTableData = tsdbNewTableData(pCfg, pTable);
if (pTableData == NULL) {
tsdbError("vgId:%d failed to insert data to table %s uid %" PRId64 " tid %d since %s", REPO_ID(pRepo),
TABLE_CHAR_NAME(pTable), TABLE_UID(pTable), TABLE_TID(pTable), tstrerror(terrno));
return -1;
}
}
void *pRow = tsdbAllocBytes(pRepo, memRowTLen(row));
if (pRow == NULL) {
tsdbError("vgId:%d failed to insert row with key %" PRId64 " to table %s while allocate %" PRIu32 " bytes since %s",
REPO_ID(pRepo), key, TABLE_CHAR_NAME(pTable), memRowTLen(row), tstrerror(terrno));
return -1;
pRepo->mem->tData[TABLE_TID(pTable)] = pTableData;
}
memRowCpy(pRow, row);
ppRow[0] = pRow; // save the memory address of data rows
ASSERT((pTableData != NULL) && pTableData->uid == TABLE_UID(pTable));
SMemRow lastRow = NULL;
int64_t osize = SL_SIZE(pTableData->pData);
tsdbSetupSkipListHookFns(pTableData->pData, pRepo, pTable, pAffectedRows, &points, &lastRow);
tSkipListPutBatchByIter(pTableData->pData, &blkIter, (iter_next_fn_t)tsdbGetSubmitBlkNext);
int64_t dsize = SL_SIZE(pTableData->pData) - osize;
tsdbTrace("vgId:%d a row is %s table %s tid %d uid %" PRIu64 " key %" PRIu64, REPO_ID(pRepo),
isRowDelete ? "deleted from" : "updated in", TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable),
key);
if(lastRow != NULL) {
TSKEY lastRowKey = memRowKey(lastRow);
if (pMemTable->keyFirst > firstRowKey) pMemTable->keyFirst = firstRowKey;
pMemTable->numOfRows += dsize;
if (pTableData->keyFirst > firstRowKey) pTableData->keyFirst = firstRowKey;
pTableData->numOfRows += dsize;
if (pMemTable->keyLast < lastRowKey) pMemTable->keyLast = lastRowKey;
if (pTableData->keyLast < lastRowKey) pTableData->keyLast = lastRowKey;
if (tsdbUpdateTableLatestInfo(pRepo, pTable, lastRow) < 0) {
return -1;
}
}
STSchema *pSchema = tsdbGetTableSchemaByVersion(pTable, pBlock->sversion);
pRepo->stat.pointsWritten += points * schemaNCols(pSchema);
pRepo->stat.totalStorage += points * schemaVLen(pSchema);
return 0;
}
static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter) {
if (pMsg == NULL) {
terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP;
......@@ -889,106 +964,6 @@ static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pT
return 0;
}
static int tsdbInsertDataToTableImpl(STsdbRepo *pRepo, STable *pTable, void **rows, int rowCounter) {
if (rowCounter < 1) return 0;
SMemTable * pMemTable = NULL;
STableData *pTableData = NULL;
STsdbMeta * pMeta = pRepo->tsdbMeta;
STsdbCfg * pCfg = &(pRepo->config);
ASSERT(pRepo->mem != NULL);
pMemTable = pRepo->mem;
if (TABLE_TID(pTable) >= pMemTable->maxTables) {
if (tsdbAdjustMemMaxTables(pMemTable, pMeta->maxTables) < 0) {
tsdbFreeRows(pRepo, rows, rowCounter);
return -1;
}
}
pTableData = pMemTable->tData[TABLE_TID(pTable)];
if (pTableData == NULL || pTableData->uid != TABLE_UID(pTable)) {
if (pTableData != NULL) {
taosWLockLatch(&(pMemTable->latch));
pMemTable->tData[TABLE_TID(pTable)] = NULL;
tsdbFreeTableData(pTableData);
taosWUnLockLatch(&(pMemTable->latch));
}
pTableData = tsdbNewTableData(pCfg, pTable);
if (pTableData == NULL) {
tsdbError("vgId:%d failed to insert data to table %s uid %" PRId64 " tid %d since %s", REPO_ID(pRepo),
TABLE_CHAR_NAME(pTable), TABLE_UID(pTable), TABLE_TID(pTable), tstrerror(terrno));
tsdbFreeRows(pRepo, rows, rowCounter);
return -1;
}
pRepo->mem->tData[TABLE_TID(pTable)] = pTableData;
}
ASSERT((pTableData != NULL) && pTableData->uid == TABLE_UID(pTable));
int64_t osize = SL_SIZE(pTableData->pData);
tSkipListPutBatch(pTableData->pData, rows, rowCounter);
int64_t dsize = SL_SIZE(pTableData->pData) - osize;
TSKEY keyFirstRow = memRowKey(rows[0]);
TSKEY keyLastRow = memRowKey(rows[rowCounter - 1]);
if (pMemTable->keyFirst > keyFirstRow) pMemTable->keyFirst = keyFirstRow;
if (pMemTable->keyLast < keyLastRow) pMemTable->keyLast = keyLastRow;
pMemTable->numOfRows += dsize;
if (pTableData->keyFirst > keyFirstRow) pTableData->keyFirst = keyFirstRow;
if (pTableData->keyLast < keyLastRow) pTableData->keyLast = keyLastRow;
pTableData->numOfRows += dsize;
// update table latest info
if (tsdbUpdateTableLatestInfo(pRepo, pTable, rows[rowCounter - 1]) < 0) {
return -1;
}
return 0;
}
static void tsdbFreeRows(STsdbRepo *pRepo, void **rows, int rowCounter) {
ASSERT(pRepo->mem != NULL);
STsdbBufPool *pBufPool = pRepo->pPool;
for (int i = rowCounter - 1; i >= 0; --i) {
SMemRow row = (SMemRow)rows[i];
int bytes = (int)memRowTLen(row);
if (pRepo->mem->extraBuffList == NULL) {
STsdbBufBlock *pBufBlock = tsdbGetCurrBufBlock(pRepo);
ASSERT(pBufBlock != NULL && pBufBlock->offset >= bytes);
pBufBlock->offset -= bytes;
pBufBlock->remain += bytes;
ASSERT(row == POINTER_SHIFT(pBufBlock->data, pBufBlock->offset));
tsdbTrace("vgId:%d free %d bytes to TSDB buffer pool, nBlocks %d offset %d remain %d", REPO_ID(pRepo), bytes,
listNEles(pRepo->mem->bufBlockList), pBufBlock->offset, pBufBlock->remain);
if (pBufBlock->offset == 0) { // return the block to buffer pool
if (tsdbLockRepo(pRepo) < 0) return;
SListNode *pNode = tdListPopTail(pRepo->mem->bufBlockList);
tdListPrependNode(pBufPool->bufBlockList, pNode);
if (tsdbUnlockRepo(pRepo) < 0) return;
}
} else {
ASSERT(listNEles(pRepo->mem->extraBuffList) > 0);
SListNode *pNode = tdListPopTail(pRepo->mem->extraBuffList);
ASSERT(row == pNode->data);
free(pNode);
tsdbTrace("vgId:%d free %d bytes to SYSTEM buffer pool", REPO_ID(pRepo), bytes);
if (listNEles(pRepo->mem->extraBuffList) == 0) {
tdListFree(pRepo->mem->extraBuffList);
pRepo->mem->extraBuffList = NULL;
}
}
}
}
static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow row) {
tsdbDebug("vgId:%d updateTableLatestColumn, %s row version:%d", REPO_ID(pRepo), pTable->name->data,
......@@ -1005,8 +980,8 @@ static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow ro
}
SDataCol *pLatestCols = pTable->lastCols;
int32_t kvIdx = 0;
bool isDataRow = isDataRow(row);
for (int16_t j = 0; j < schemaNCols(pSchema); j++) {
STColumn *pTCol = schemaColAt(pSchema, j);
// ignore not exist colId
......@@ -1017,16 +992,8 @@ static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow ro
void *value = NULL;
if (isDataRow) {
value = tdGetRowDataOfCol(memRowDataBody(row), (int8_t)pTCol->type,
TD_DATA_ROW_HEAD_SIZE + pTCol->offset);
} else {
// SKVRow
SColIdx *pColIdx = tdGetKVRowIdxOfCol(memRowKvBody(row), pTCol->colId);
if (pColIdx) {
value = tdGetKvRowDataOfCol(memRowKvBody(row), pColIdx->offset);
}
}
value = tdGetMemRowDataOfColEx(row, pTCol->colId, (int8_t)pTCol->type,
TD_DATA_ROW_HEAD_SIZE + pSchema->columns[j].offset, &kvIdx);
if ((value == NULL) || isNull(value, pTCol->type)) {
continue;
......@@ -1055,13 +1022,14 @@ static int tsdbUpdateTableLatestInfo(STsdbRepo *pRepo, STable *pTable, SMemRow r
// if cacheLastRow config has been reset, free the lastRow
if (!pCfg->cacheLastRow && pTable->lastRow != NULL) {
taosTZfree(pTable->lastRow);
SMemRow cachedLastRow = pTable->lastRow;
TSDB_WLOCK_TABLE(pTable);
pTable->lastRow = NULL;
TSDB_WUNLOCK_TABLE(pTable);
taosTZfree(cachedLastRow);
}
if (tsdbGetTableLastKeyImpl(pTable) < memRowKey(row)) {
if (tsdbGetTableLastKeyImpl(pTable) <= memRowKey(row)) {
if (CACHE_LAST_ROW(pCfg) || pTable->lastRow != NULL) {
SMemRow nrow = pTable->lastRow;
if (taosTSizeof(nrow) < memRowTLen(row)) {
......
此差异已折叠。
......@@ -244,6 +244,7 @@ int tsdbLoadBlockInfo(SReadH *pReadh, void *pTarget) {
int tsdbLoadBlockData(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo) {
ASSERT(pBlock->numOfSubBlocks > 0);
int8_t update = pReadh->pRepo->config.update;
SBlock *iBlock = pBlock;
if (pBlock->numOfSubBlocks > 1) {
......@@ -258,7 +259,7 @@ int tsdbLoadBlockData(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo) {
for (int i = 1; i < pBlock->numOfSubBlocks; i++) {
iBlock++;
if (tsdbLoadBlockDataImpl(pReadh, iBlock, pReadh->pDCols[1]) < 0) return -1;
if (tdMergeDataCols(pReadh->pDCols[0], pReadh->pDCols[1], pReadh->pDCols[1]->numOfRows, NULL) < 0) return -1;
if (tdMergeDataCols(pReadh->pDCols[0], pReadh->pDCols[1], pReadh->pDCols[1]->numOfRows, NULL, update != TD_ROW_PARTIAL_UPDATE) < 0) return -1;
}
ASSERT(pReadh->pDCols[0]->numOfRows == pBlock->numOfRows);
......@@ -270,6 +271,7 @@ int tsdbLoadBlockData(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo) {
int tsdbLoadBlockDataCols(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo, int16_t *colIds, int numOfColsIds) {
ASSERT(pBlock->numOfSubBlocks > 0);
int8_t update = pReadh->pRepo->config.update;
SBlock *iBlock = pBlock;
if (pBlock->numOfSubBlocks > 1) {
......@@ -284,7 +286,7 @@ int tsdbLoadBlockDataCols(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo,
for (int i = 1; i < pBlock->numOfSubBlocks; i++) {
iBlock++;
if (tsdbLoadBlockDataColsImpl(pReadh, iBlock, pReadh->pDCols[1], colIds, numOfColsIds) < 0) return -1;
if (tdMergeDataCols(pReadh->pDCols[0], pReadh->pDCols[1], pReadh->pDCols[1]->numOfRows, NULL) < 0) return -1;
if (tdMergeDataCols(pReadh->pDCols[0], pReadh->pDCols[1], pReadh->pDCols[1]->numOfRows, NULL, update != TD_ROW_PARTIAL_UPDATE) < 0) return -1;
}
ASSERT(pReadh->pDCols[0]->numOfRows == pBlock->numOfRows);
......@@ -657,4 +659,4 @@ static int tsdbLoadColData(SReadH *pReadh, SDFile *pDFile, SBlock *pBlock, SBloc
}
return 0;
}
\ No newline at end of file
}
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "tsdbRowMergeBuf.h"
#include "tdataformat.h"
// row1 has higher priority
SMemRow tsdbMergeTwoRows(SMergeBuf *pBuf, SMemRow row1, SMemRow row2, STSchema *pSchema1, STSchema *pSchema2) {
if(row2 == NULL) return row1;
if(row1 == NULL) return row2;
ASSERT(pSchema1->version == memRowVersion(row1));
ASSERT(pSchema2->version == memRowVersion(row2));
if(tsdbMergeBufMakeSureRoom(pBuf, pSchema1, pSchema2) < 0) {
return NULL;
}
return mergeTwoMemRows(*pBuf, row1, row2, pSchema1, pSchema2);
}
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TD_TFUNCTIONAL_H
#define TD_TFUNCTIONAL_H
#ifdef __cplusplus
extern "C" {
#endif
#include "os.h"
//TODO: hard to use, trying to rewrite it using va_list
typedef void* (*GenericVaFunc)(void* args[]);
typedef int32_t (*I32VaFunc) (void* args[]);
typedef void (*VoidVaFunc) (void* args[]);
typedef struct GenericSavedFunc {
GenericVaFunc func;
void * args[];
} tGenericSavedFunc;
typedef struct I32SavedFunc {
I32VaFunc func;
void * args[];
} tI32SavedFunc;
typedef struct VoidSavedFunc {
VoidVaFunc func;
void * args[];
} tVoidSavedFunc;
tGenericSavedFunc* genericSavedFuncInit(GenericVaFunc func, int numOfArgs);
tI32SavedFunc* i32SavedFuncInit(I32VaFunc func, int numOfArgs);
tVoidSavedFunc* voidSavedFuncInit(VoidVaFunc func, int numOfArgs);
void* genericInvoke(tGenericSavedFunc* const pSavedFunc);
int32_t i32Invoke(tI32SavedFunc* const pSavedFunc);
void voidInvoke(tVoidSavedFunc* const pSavedFunc);
#ifdef __cplusplus
}
#endif
#endif
......@@ -23,6 +23,7 @@ extern "C" {
#include "os.h"
#include "taosdef.h"
#include "tarray.h"
#include "tfunctional.h"
#define MAX_SKIP_LIST_LEVEL 15
#define SKIP_LIST_RECORD_PERFORMANCE 0
......@@ -30,13 +31,17 @@ extern "C" {
// For key property setting
#define SL_ALLOW_DUP_KEY (uint8_t)0x0 // Allow duplicate key exists (for tag index usage)
#define SL_DISCARD_DUP_KEY (uint8_t)0x1 // Discard duplicate key (for data update=0 case)
#define SL_UPDATE_DUP_KEY (uint8_t)0x2 // Update duplicate key by remove/insert (for data update=1 case)
#define SL_UPDATE_DUP_KEY (uint8_t)0x2 // Update duplicate key by remove/insert (for data update!=0 case)
// For thread safety setting
#define SL_THREAD_SAFE (uint8_t)0x4
typedef char *SSkipListKey;
typedef char *(*__sl_key_fn_t)(const void *);
typedef void (*sl_patch_row_fn_t)(void * pDst, const void * pSrc);
typedef void* (*iter_next_fn_t)(void *iter);
typedef struct SSkipListNode {
uint8_t level;
void * pData;
......@@ -95,6 +100,12 @@ typedef struct tSkipListState {
uint64_t nTotalElapsedTimeForInsert;
} tSkipListState;
typedef enum {
SSkipListPutSuccess = 0,
SSkipListPutEarlyStop = 1,
SSkipListPutSkipOne = 2
} SSkipListPutStatus;
typedef struct SSkipList {
unsigned int seed;
__compar_fn_t comparFn;
......@@ -111,6 +122,7 @@ typedef struct SSkipList {
#if SKIP_LIST_RECORD_PERFORMANCE
tSkipListState state; // skiplist state
#endif
tGenericSavedFunc* insertHandleFn;
} SSkipList;
typedef struct SSkipListIterator {
......@@ -118,7 +130,7 @@ typedef struct SSkipListIterator {
SSkipListNode *cur;
int32_t step; // the number of nodes that have been checked already
int32_t order; // order of the iterator
SSkipListNode *next; // next points to the true qualified node in skip list
SSkipListNode *next; // next points to the true qualified node in skiplist
} SSkipListIterator;
#define SL_IS_THREAD_SAFE(s) (((s)->flags) & SL_THREAD_SAFE)
......@@ -132,7 +144,7 @@ SSkipList *tSkipListCreate(uint8_t maxLevel, uint8_t keyType, uint16_t keyLen, _
__sl_key_fn_t fn);
void tSkipListDestroy(SSkipList *pSkipList);
SSkipListNode * tSkipListPut(SSkipList *pSkipList, void *pData);
void tSkipListPutBatch(SSkipList *pSkipList, void **ppData, int ndata);
void tSkipListPutBatchByIter(SSkipList *pSkipList, void *iter, iter_next_fn_t iterate);
SArray * tSkipListGet(SSkipList *pSkipList, SSkipListKey pKey);
void tSkipListPrint(SSkipList *pSkipList, int16_t nlevel);
SSkipListIterator *tSkipListCreateIter(SSkipList *pSkipList);
......
......@@ -480,6 +480,10 @@ int tsCompressTimestampImp(const char *const input, const int nelements, char *c
int64_t *istream = (int64_t *)input;
int64_t prev_value = istream[0];
if(prev_value >= 0x8000000000000000) {
uWarn("compression timestamp is over signed long long range. ts = 0x%"PRIx64" \n", prev_value);
goto _exit_over;
}
int64_t prev_delta = -prev_value;
uint8_t flags = 0, flag1 = 0, flag2 = 0;
uint64_t dd1 = 0, dd2 = 0;
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "tfunctional.h"
#include "tarray.h"
tGenericSavedFunc* genericSavedFuncInit(GenericVaFunc func, int numOfArgs) {
tGenericSavedFunc* pSavedFunc = malloc(sizeof(tGenericSavedFunc) + numOfArgs * (sizeof(void*)));
pSavedFunc->func = func;
return pSavedFunc;
}
tI32SavedFunc* i32SavedFuncInit(I32VaFunc func, int numOfArgs) {
tI32SavedFunc* pSavedFunc = malloc(sizeof(tI32SavedFunc) + numOfArgs * sizeof(void *));
pSavedFunc->func = func;
return pSavedFunc;
}
tVoidSavedFunc* voidSavedFuncInit(VoidVaFunc func, int numOfArgs) {
tVoidSavedFunc* pSavedFunc = malloc(sizeof(tVoidSavedFunc) + numOfArgs * sizeof(void*));
pSavedFunc->func = func;
return pSavedFunc;
}
FORCE_INLINE void* genericInvoke(tGenericSavedFunc* const pSavedFunc) {
return pSavedFunc->func(pSavedFunc->args);
}
FORCE_INLINE int32_t i32Invoke(tI32SavedFunc* const pSavedFunc) {
return pSavedFunc->func(pSavedFunc->args);
}
FORCE_INLINE void voidInvoke(tVoidSavedFunc* const pSavedFunc) {
if(pSavedFunc) pSavedFunc->func(pSavedFunc->args);
}
......@@ -16,6 +16,7 @@
#include "tskiplist.h"
#include "os.h"
#include "tcompare.h"
#include "tdataformat.h"
#include "tulog.h"
#include "tutil.h"
......@@ -31,6 +32,7 @@ static SSkipListNode *tSkipListNewNode(uint8_t level);
static SSkipListNode *tSkipListPutImpl(SSkipList *pSkipList, void *pData, SSkipListNode **direction, bool isForward,
bool hasDup);
static FORCE_INLINE int tSkipListWLock(SSkipList *pSkipList);
static FORCE_INLINE int tSkipListRLock(SSkipList *pSkipList);
static FORCE_INLINE int tSkipListUnlock(SSkipList *pSkipList);
......@@ -80,6 +82,7 @@ SSkipList *tSkipListCreate(uint8_t maxLevel, uint8_t keyType, uint16_t keyLen, _
#if SKIP_LIST_RECORD_PERFORMANCE
pSkipList->state.nTotalMemSize += sizeof(SSkipList);
#endif
pSkipList->insertHandleFn = NULL;
return pSkipList;
}
......@@ -97,6 +100,8 @@ void tSkipListDestroy(SSkipList *pSkipList) {
tSkipListFreeNode(pTemp);
}
tfree(pSkipList->insertHandleFn);
tSkipListUnlock(pSkipList);
if (pSkipList->lock != NULL) {
pthread_rwlock_destroy(pSkipList->lock);
......@@ -124,8 +129,7 @@ SSkipListNode *tSkipListPut(SSkipList *pSkipList, void *pData) {
return pNode;
}
// Put a batch of data into skiplist. The batch of data must be in ascending order
void tSkipListPutBatch(SSkipList *pSkipList, void **ppData, int ndata) {
void tSkipListPutBatchByIter(SSkipList *pSkipList, void *iter, iter_next_fn_t iterate) {
SSkipListNode *backward[MAX_SKIP_LIST_LEVEL] = {0};
SSkipListNode *forward[MAX_SKIP_LIST_LEVEL] = {0};
bool hasDup = false;
......@@ -135,17 +139,21 @@ void tSkipListPutBatch(SSkipList *pSkipList, void **ppData, int ndata) {
tSkipListWLock(pSkipList);
void* pData = iterate(iter);
if(pData == NULL) return;
// backward to put the first data
hasDup = tSkipListGetPosToPut(pSkipList, backward, ppData[0]);
tSkipListPutImpl(pSkipList, ppData[0], backward, false, hasDup);
hasDup = tSkipListGetPosToPut(pSkipList, backward, pData);
tSkipListPutImpl(pSkipList, pData, backward, false, hasDup);
for (int level = 0; level < pSkipList->maxLevel; level++) {
forward[level] = SL_NODE_GET_BACKWARD_POINTER(backward[level], level);
}
// forward to put the rest of data
for (int idata = 1; idata < ndata; idata++) {
pDataKey = pSkipList->keyFn(ppData[idata]);
while ((pData = iterate(iter)) != NULL) {
pDataKey = pSkipList->keyFn(pData);
hasDup = false;
// Compare max key
......@@ -186,9 +194,8 @@ void tSkipListPutBatch(SSkipList *pSkipList, void **ppData, int ndata) {
}
}
tSkipListPutImpl(pSkipList, ppData[idata], forward, true, hasDup);
tSkipListPutImpl(pSkipList, pData, forward, true, hasDup);
}
tSkipListUnlock(pSkipList);
}
......@@ -661,18 +668,40 @@ static SSkipListNode *tSkipListPutImpl(SSkipList *pSkipList, void *pData, SSkipL
uint8_t dupMode = SL_DUP_MODE(pSkipList);
SSkipListNode *pNode = NULL;
if (hasDup && (dupMode == SL_DISCARD_DUP_KEY || dupMode == SL_UPDATE_DUP_KEY)) {
if (hasDup && (dupMode != SL_ALLOW_DUP_KEY)) {
if (dupMode == SL_UPDATE_DUP_KEY) {
if (isForward) {
pNode = SL_NODE_GET_FORWARD_POINTER(direction[0], 0);
} else {
pNode = SL_NODE_GET_BACKWARD_POINTER(direction[0], 0);
}
atomic_store_ptr(&(pNode->pData), pData);
if (pSkipList->insertHandleFn) {
pSkipList->insertHandleFn->args[0] = pData;
pSkipList->insertHandleFn->args[1] = pNode->pData;
pData = genericInvoke(pSkipList->insertHandleFn);
}
if(pData) {
atomic_store_ptr(&(pNode->pData), pData);
}
} else {
//for compatiblity, duplicate key inserted when update=0 should be also calculated as affected rows!
if(pSkipList->insertHandleFn) {
pSkipList->insertHandleFn->args[0] = NULL;
pSkipList->insertHandleFn->args[1] = NULL;
genericInvoke(pSkipList->insertHandleFn);
}
}
} else {
pNode = tSkipListNewNode(getSkipListRandLevel(pSkipList));
if (pNode != NULL) {
// insertHandleFn will be assigned only for timeseries data,
// in which case, pData is pointed to an memory to be freed later;
// while for metadata, the mem alloc will not be called.
if (pSkipList->insertHandleFn) {
pSkipList->insertHandleFn->args[0] = pData;
pSkipList->insertHandleFn->args[1] = NULL;
pData = genericInvoke(pSkipList->insertHandleFn);
}
pNode->pData = pData;
tSkipListDoInsert(pSkipList, direction, pNode, isForward);
......
def pre_test(){
sh '''
sudo rmtaos||echo 'no taosd installed'
'''
sh '''
cd ${WKC}
git reset --hard
git checkout $BRANCH_NAME
git pull
git submodule update
cd ${WK}
git reset --hard
git checkout $BRANCH_NAME
git pull
export TZ=Asia/Harbin
date
rm -rf ${WK}/debug
mkdir debug
cd debug
cmake .. > /dev/null
make > /dev/null
make install > /dev/null
pip3 install ${WKC}/src/connector/python
'''
return 1
}
def pre_test_p(){
sh '''
sudo rmtaos||echo 'no taosd installed'
'''
......@@ -39,7 +66,7 @@ pipeline {
stage('pytest') {
agent{label 'slad1'}
steps {
pre_test()
pre_test_p()
sh '''
cd ${WKC}/tests
find pytest -name '*'sql|xargs rm -rf
......@@ -48,7 +75,7 @@ pipeline {
}
}
stage('test_b1') {
agent{label 'master'}
agent{label 'slad2'}
steps {
pre_test()
......@@ -62,7 +89,7 @@ pipeline {
}
stage('test_crash_gen') {
agent{label "slad2"}
agent{label "slad3"}
steps {
pre_test()
sh '''
......@@ -92,7 +119,7 @@ pipeline {
}
sh'''
systemctl start taosd
nohup taosd >/dev/null &
sleep 10
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
......@@ -128,7 +155,7 @@ pipeline {
'''
}
sh '''
systemctl stop taosd
pkill -9 taosd || echo 1
cd ${WKC}/tests
./test-all.sh b2
date
......@@ -141,7 +168,7 @@ pipeline {
}
stage('test_valgrind') {
agent{label "slad3"}
agent{label "slad4"}
steps {
pre_test()
......
......@@ -33,7 +33,7 @@ namespace TDengineDriver
//sql parameters
private string dbName;
private string tbName;
private string precision;
private bool isInsertData;
private bool isQueryData;
......@@ -61,9 +61,9 @@ namespace TDengineDriver
tester.checkInsert();
tester.checkSelect();
tester.checkDropTable();
tester.dropDatabase();
tester.CloseConnection();
tester.cleanup();
}
......@@ -156,7 +156,9 @@ namespace TDengineDriver
Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "How many rows to insert, default is 100");
Console.WriteLine("{0:G}{1:G}", indent, "-c");
Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Configuration directory");
//
Console.WriteLine("{0:G}{1:G}", indent, "-ps");
Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Configurate db precision,default millisecond");
ExitProgram();
}
}
......@@ -168,9 +170,9 @@ namespace TDengineDriver
host = this.GetArgumentAsString(argv, "-h", "127.0.0.1");
user = this.GetArgumentAsString(argv, "-u", "root");
password = this.GetArgumentAsString(argv, "-p", "taosdata");
dbName = this.GetArgumentAsString(argv, "-db", "test");
dbName = this.GetArgumentAsString(argv, "-d", "test");
tbName = this.GetArgumentAsString(argv, "-s", "weather");
precision = this.GetArgumentAsString(argv, "-ps", "ms");
isInsertData = this.GetArgumentAsLong(argv, "-w", 0, 1, 1) != 0;
isQueryData = this.GetArgumentAsLong(argv, "-r", 0, 1, 1) != 0;
tableCount = this.GetArgumentAsLong(argv, "-n", 1, 10000, 10);
......@@ -183,6 +185,7 @@ namespace TDengineDriver
{
TDengine.Options((int)TDengineInitOption.TDDB_OPTION_CONFIGDIR, this.configDir);
TDengine.Options((int)TDengineInitOption.TDDB_OPTION_SHELL_ACTIVITY_TIMER, "60");
Console.WriteLine("init...");
TDengine.Init();
Console.WriteLine("get connection starting...");
}
......@@ -204,7 +207,7 @@ namespace TDengineDriver
public void createDatabase()
{
StringBuilder sql = new StringBuilder();
sql.Append("create database if not exists ").Append(this.dbName);
sql.Append("create database if not exists ").Append(this.dbName).Append(" precision '").Append(this.precision).Append("'");
execute(sql.ToString());
}
public void useDatabase()
......@@ -216,8 +219,8 @@ namespace TDengineDriver
public void checkSelect()
{
StringBuilder sql = new StringBuilder();
sql.Append("select * from test.weather");
execute(sql.ToString());
sql.Append("select * from ").Append(this.dbName).Append(".").Append(this.tbName);
ExecuteQuery(sql.ToString());
}
public void createTable()
{
......@@ -228,7 +231,7 @@ namespace TDengineDriver
public void checkInsert()
{
StringBuilder sql = new StringBuilder();
sql.Append("insert into test.weather (ts, temperature, humidity) values(now, 20.5, 34)");
sql.Append("insert into ").Append(this.dbName).Append(".").Append(this.tbName).Append("(ts, temperature, humidity) values(now, 20.5, 34)");
execute(sql.ToString());
}
public void checkDropTable()
......@@ -237,6 +240,12 @@ namespace TDengineDriver
sql.Append("drop table if exists ").Append(this.dbName).Append(".").Append(this.tbName).Append("");
execute(sql.ToString());
}
public void dropDatabase()
{
StringBuilder sql = new StringBuilder();
sql.Append("drop database if exists ").Append(this.dbName);
execute(sql.ToString());
}
public void execute(string sql)
{
DateTime dt1 = DateTime.Now;
......@@ -266,6 +275,7 @@ namespace TDengineDriver
DateTime dt1 = DateTime.Now;
long queryRows = 0;
IntPtr res = TDengine.Query(conn, sql);
getPrecision(res);
if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0))
{
Console.Write(sql.ToString() + " failure, ");
......@@ -379,8 +389,31 @@ namespace TDengineDriver
static void ExitProgram()
{
TDengine.Cleanup();
System.Environment.Exit(0);
}
public void cleanup()
{
Console.WriteLine("clean up...");
System.Environment.Exit(0);
}
// method to get db precision
public void getPrecision(IntPtr res)
{
int psc=TDengine.ResultPrecision(res);
switch(psc)
{
case 0:
Console.WriteLine("db:[{0:G}]'s precision is {1:G}",this.dbName,"millisecond");
break;
case 1:
Console.WriteLine("db:[{0:G}]'s precision is {1:G}",this.dbName,"microsecond");
break;
case 2:
Console.WriteLine("db:[{0:G}]'s precision is {1:G}",this.dbName,"nanosecond");
break;
}
}
}
}
......@@ -19,136 +19,153 @@ using System.Runtime.InteropServices;
namespace TDengineDriver
{
enum TDengineDataType {
TSDB_DATA_TYPE_NULL = 0, // 1 bytes
TSDB_DATA_TYPE_BOOL = 1, // 1 bytes
TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes
TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes
TSDB_DATA_TYPE_INT = 4, // 4 bytes
TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes
TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes
TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes
TSDB_DATA_TYPE_BINARY = 8, // string
TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes
TSDB_DATA_TYPE_NCHAR = 10 // unicode string
}
enum TDengineInitOption
{
TSDB_OPTION_LOCALE = 0,
TSDB_OPTION_CHARSET = 1,
TSDB_OPTION_TIMEZONE = 2,
TDDB_OPTION_CONFIGDIR = 3,
TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4
}
class TDengineMeta
{
public string name;
public short size;
public byte type;
public string TypeName()
enum TDengineDataType
{
switch ((TDengineDataType)type)
{
case TDengineDataType.TSDB_DATA_TYPE_BOOL:
return "BOOLEAN";
case TDengineDataType.TSDB_DATA_TYPE_TINYINT:
return "BYTE";
case TDengineDataType.TSDB_DATA_TYPE_SMALLINT:
return "SHORT";
case TDengineDataType.TSDB_DATA_TYPE_INT:
return "INT";
case TDengineDataType.TSDB_DATA_TYPE_BIGINT:
return "LONG";
case TDengineDataType.TSDB_DATA_TYPE_FLOAT:
return "FLOAT";
case TDengineDataType.TSDB_DATA_TYPE_DOUBLE:
return "DOUBLE";
case TDengineDataType.TSDB_DATA_TYPE_BINARY:
return "STRING";
case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP:
return "TIMESTAMP";
case TDengineDataType.TSDB_DATA_TYPE_NCHAR:
return "NCHAR";
default:
return "undefine";
}
TSDB_DATA_TYPE_NULL = 0, // 1 bytes
TSDB_DATA_TYPE_BOOL = 1, // 1 bytes
TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes
TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes
TSDB_DATA_TYPE_INT = 4, // 4 bytes
TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes
TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes
TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes
TSDB_DATA_TYPE_BINARY = 8, // string
TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes
TSDB_DATA_TYPE_NCHAR = 10, // unicode string
TSDB_DATA_TYPE_UTINYINT = 11,// 1 byte
TSDB_DATA_TYPE_USMALLINT= 12,// 2 bytes
TSDB_DATA_TYPE_UINT = 13, // 4 bytes
TSDB_DATA_TYPE_UBIGINT= 14 // 8 bytes
}
}
class TDengine
{
public const int TSDB_CODE_SUCCESS = 0;
enum TDengineInitOption
{
TSDB_OPTION_LOCALE = 0,
TSDB_OPTION_CHARSET = 1,
TSDB_OPTION_TIMEZONE = 2,
TDDB_OPTION_CONFIGDIR = 3,
TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4
}
[DllImport("taos.dll", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)]
static extern public void Init();
class TDengineMeta
{
public string name;
public short size;
public byte type;
public string TypeName()
{
switch ((TDengineDataType)type)
{
case TDengineDataType.TSDB_DATA_TYPE_BOOL:
return "BOOL";
case TDengineDataType.TSDB_DATA_TYPE_TINYINT:
return "TINYINT";
case TDengineDataType.TSDB_DATA_TYPE_SMALLINT:
return "SMALLINT";
case TDengineDataType.TSDB_DATA_TYPE_INT:
return "INT";
case TDengineDataType.TSDB_DATA_TYPE_BIGINT:
return "BIGINT";
case TDengineDataType.TSDB_DATA_TYPE_UTINYINT:
return "TINYINT UNSIGNED";
case TDengineDataType.TSDB_DATA_TYPE_USMALLINT:
return "SMALLINT UNSIGNED";
case TDengineDataType.TSDB_DATA_TYPE_UINT:
return "INT UNSIGNED";
case TDengineDataType.TSDB_DATA_TYPE_UBIGINT:
return "BIGINT UNSIGNED";
case TDengineDataType.TSDB_DATA_TYPE_FLOAT:
return "FLOAT";
case TDengineDataType.TSDB_DATA_TYPE_DOUBLE:
return "DOUBLE";
case TDengineDataType.TSDB_DATA_TYPE_BINARY:
return "STRING";
case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP:
return "TIMESTAMP";
case TDengineDataType.TSDB_DATA_TYPE_NCHAR:
return "NCHAR";
default:
return "undefine";
}
}
}
[DllImport("taos.dll", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)]
static extern public void Cleanup();
class TDengine
{
public const int TSDB_CODE_SUCCESS = 0;
[DllImport("taos.dll", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)]
static extern public void Options(int option, string value);
[DllImport("taos", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)]
static extern public void Init();
[DllImport("taos.dll", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr Connect(string ip, string user, string password, string db, short port);
[DllImport("taos", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)]
static extern public void Cleanup();
[DllImport("taos.dll", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)]
static extern private IntPtr taos_errstr(IntPtr res);
static public string Error(IntPtr res)
{
IntPtr errPtr = taos_errstr(res);
return Marshal.PtrToStringAnsi(errPtr);
}
[DllImport("taos", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)]
static extern public void Options(int option, string value);
[DllImport("taos.dll", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)]
static extern public int ErrorNo(IntPtr res);
[DllImport("taos", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr Connect(string ip, string user, string password, string db, short port);
[DllImport("taos.dll", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr Query(IntPtr conn, string sqlstr);
[DllImport("taos", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)]
static extern private IntPtr taos_errstr(IntPtr res);
static public string Error(IntPtr res)
{
IntPtr errPtr = taos_errstr(res);
return Marshal.PtrToStringAnsi(errPtr);
}
[DllImport("taos.dll", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)]
static extern public int AffectRows(IntPtr res);
[DllImport("taos", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)]
static extern public int ErrorNo(IntPtr res);
[DllImport("taos.dll", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)]
static extern public int FieldCount(IntPtr res);
[DllImport("taos", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr Query(IntPtr conn, string sqlstr);
[DllImport("taos.dll", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)]
static extern private IntPtr taos_fetch_fields(IntPtr res);
static public List<TDengineMeta> FetchFields(IntPtr res)
{
const int fieldSize = 68;
List<TDengineMeta> metas = new List<TDengineMeta>();
if (res == IntPtr.Zero)
{
return metas;
}
int fieldCount = FieldCount(res);
IntPtr fieldsPtr = taos_fetch_fields(res);
for (int i = 0; i < fieldCount; ++i)
{
int offset = i * fieldSize;
TDengineMeta meta = new TDengineMeta();
meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset);
meta.type = Marshal.ReadByte(fieldsPtr + offset + 65);
meta.size = Marshal.ReadInt16(fieldsPtr + offset + 66);
metas.Add(meta);
}
return metas;
}
[DllImport("taos", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)]
static extern public int AffectRows(IntPtr res);
[DllImport("taos.dll", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr FetchRows(IntPtr res);
[DllImport("taos", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)]
static extern public int FieldCount(IntPtr res);
[DllImport("taos.dll", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr FreeResult(IntPtr res);
[DllImport("taos", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)]
static extern private IntPtr taos_fetch_fields(IntPtr res);
static public List<TDengineMeta> FetchFields(IntPtr res)
{
const int fieldSize = 68;
[DllImport("taos.dll", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)]
static extern public int Close(IntPtr taos);
}
}
\ No newline at end of file
List<TDengineMeta> metas = new List<TDengineMeta>();
if (res == IntPtr.Zero)
{
return metas;
}
int fieldCount = FieldCount(res);
IntPtr fieldsPtr = taos_fetch_fields(res);
for (int i = 0; i < fieldCount; ++i)
{
int offset = i * fieldSize;
TDengineMeta meta = new TDengineMeta();
meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset);
meta.type = Marshal.ReadByte(fieldsPtr + offset + 65);
meta.size = Marshal.ReadInt16(fieldsPtr + offset + 66);
metas.Add(meta);
}
return metas;
}
[DllImport("taos", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr FetchRows(IntPtr res);
[DllImport("taos", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr FreeResult(IntPtr res);
[DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)]
static extern public int Close(IntPtr taos);
//get precisionin parameter restultset
[DllImport("taos", EntryPoint = "taos_result_precision", CallingConvention = CallingConvention.Cdecl)]
static extern public int ResultPrecision(IntPtr taos);
}
}
......@@ -163,5 +163,9 @@ namespace TDengineDriver
[DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)]
static extern public int Close(IntPtr taos);
//get precisionin parameter restultset
[DllImport("taos", EntryPoint = "taos_result_precision", CallingConvention = CallingConvention.Cdecl)]
static extern public int ResultPrecision(IntPtr taos);
}
}
......@@ -163,5 +163,8 @@ namespace TDengineDriver
[DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)]
static extern public int Close(IntPtr taos);
//get precisionin parameter restultset
[DllImport("taos", EntryPoint = "taos_result_precision", CallingConvention = CallingConvention.Cdecl)]
static extern public int ResultPrecision(IntPtr taos);
}
}
def pre_test(){
sh '''
sudo rmtaos||echo 'no taosd installed'
'''
sh '''
cd ${WKC}
git reset --hard
git checkout $BRANCH_NAME
git pull
git submodule update
cd ${WK}
git reset --hard
git checkout $BRANCH_NAME
git pull
export TZ=Asia/Harbin
date
rm -rf ${WK}/debug
mkdir debug
cd debug
cmake .. > /dev/null
make > /dev/null
make install > /dev/null
pip3 install ${WKC}/src/connector/python/ || echo 0
'''
return 1
}
def pre_test_p(){
sh '''
sudo rmtaos||echo 'no taosd installed'
'''
......@@ -39,7 +66,7 @@ pipeline {
stage('pytest') {
agent{label 'slam1'}
steps {
pre_test()
pre_test_p()
sh '''
cd ${WKC}/tests
find pytest -name '*'sql|xargs rm -rf
......@@ -92,7 +119,7 @@ pipeline {
}
sh'''
systemctl start taosd
nohup taosd >/dev/null &
sleep 10
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
......@@ -136,7 +163,7 @@ pipeline {
'''
}
sh '''
systemctl stop taosd
pkill -9 taosd || echo 1
cd ${WKC}/tests
./test-all.sh b2
date
......
......@@ -163,6 +163,11 @@ python3 test.py -f tools/taosdemoTestSampleData.py
python3 test.py -f tools/taosdemoTestInterlace.py
python3 test.py -f tools/taosdemoTestQuery.py
# nano support
python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py
python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py
python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py
python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertTime_step.py
python3 test.py -f tools/taosdumpTestNanoSupport.py
# update
......@@ -247,6 +252,7 @@ python3 ./test.py -f query/queryStateWindow.py
python3 ./test.py -f query/nestedQuery/queryWithOrderLimit.py
python3 ./test.py -f query/nestquery_last_row.py
python3 ./test.py -f query/queryCnameDisplay.py
python3 ./test.py -f query/operator_cost.py
python3 test.py -f query/nestedQuery/queryWithSpread.py
#stream
......@@ -368,6 +374,15 @@ python3 test.py -f alter/alter_keep.py
python3 test.py -f alter/alter_cacheLastRow.py
python3 ./test.py -f query/querySession.py
python3 test.py -f alter/alter_create_exception.py
python3 ./test.py -f insert/flushwhiledrop.py
#======================p4-end===============
python3 test.py -f tools/taosdemoAllTest/pytest.py
此差异已折叠。
8.855,"binary_str0" ,1626870128248246976
8.75,"binary_str1" ,1626870128249060032
5.44,"binary_str2" ,1626870128249067968
8.45,"binary_str3" ,1626870128249072064
4.07,"binary_str4" ,1626870128249075904
6.97,"binary_str5" ,1626870128249078976
6.86,"binary_str6" ,1626870128249082048
1.585,"binary_str7" ,1626870128249085120
1.4,"binary_str8" ,1626870128249087936
5.135,"binary_str9" ,1626870128249092032
3.15,"binary_str10" ,1626870128249095104
1.765,"binary_str11" ,1626870128249097920
7.71,"binary_str12" ,1626870128249100992
3.91,"binary_str13" ,1626870128249104064
5.615,"binary_str14" ,1626870128249106880
9.495,"binary_str15" ,1626870128249109952
3.825,"binary_str16" ,1626870128249113024
1.94,"binary_str17" ,1626870128249117120
5.385,"binary_str18" ,1626870128249119936
7.075,"binary_str19" ,1626870128249123008
5.715,"binary_str20" ,1626870128249126080
1.83,"binary_str21" ,1626870128249128896
6.365,"binary_str22" ,1626870128249131968
6.55,"binary_str23" ,1626870128249135040
6.315,"binary_str24" ,1626870128249138112
3.82,"binary_str25" ,1626870128249140928
2.455,"binary_str26" ,1626870128249145024
7.795,"binary_str27" ,1626870128249148096
2.47,"binary_str28" ,1626870128249150912
1.37,"binary_str29" ,1626870128249155008
5.39,"binary_str30" ,1626870128249158080
5.13,"binary_str31" ,1626870128249160896
4.09,"binary_str32" ,1626870128249163968
5.855,"binary_str33" ,1626870128249167040
0.17,"binary_str34" ,1626870128249170112
1.955,"binary_str35" ,1626870128249173952
0.585,"binary_str36" ,1626870128249178048
0.33,"binary_str37" ,1626870128249181120
7.925,"binary_str38" ,1626870128249183936
9.685,"binary_str39" ,1626870128249187008
2.6,"binary_str40" ,1626870128249191104
5.705,"binary_str41" ,1626870128249193920
3.965,"binary_str42" ,1626870128249196992
4.43,"binary_str43" ,1626870128249200064
8.73,"binary_str44" ,1626870128249202880
3.105,"binary_str45" ,1626870128249205952
9.39,"binary_str46" ,1626870128249209024
2.825,"binary_str47" ,1626870128249212096
9.675,"binary_str48" ,1626870128249214912
9.99,"binary_str49" ,1626870128249217984
4.51,"binary_str50" ,1626870128249221056
4.94,"binary_str51" ,1626870128249223872
7.72,"binary_str52" ,1626870128249226944
4.135,"binary_str53" ,1626870128249231040
2.325,"binary_str54" ,1626870128249234112
4.585,"binary_str55" ,1626870128249236928
8.76,"binary_str56" ,1626870128249240000
4.715,"binary_str57" ,1626870128249243072
0.56,"binary_str58" ,1626870128249245888
5.35,"binary_str59" ,1626870128249249984
5.075,"binary_str60" ,1626870128249253056
6.665,"binary_str61" ,1626870128249256128
7.13,"binary_str62" ,1626870128249258944
2.775,"binary_str63" ,1626870128249262016
5.775,"binary_str64" ,1626870128249265088
1.62,"binary_str65" ,1626870128249267904
1.625,"binary_str66" ,1626870128249270976
8.15,"binary_str67" ,1626870128249274048
0.75,"binary_str68" ,1626870128249277120
3.265,"binary_str69" ,1626870128249280960
8.585,"binary_str70" ,1626870128249284032
1.88,"binary_str71" ,1626870128249287104
8.44,"binary_str72" ,1626870128249289920
5.12,"binary_str73" ,1626870128249295040
2.58,"binary_str74" ,1626870128249298112
9.42,"binary_str75" ,1626870128249300928
1.765,"binary_str76" ,1626870128249304000
2.66,"binary_str77" ,1626870128249308096
1.405,"binary_str78" ,1626870128249310912
5.595,"binary_str79" ,1626870128249315008
2.28,"binary_str80" ,1626870128249318080
9.24,"binary_str81" ,1626870128249320896
9.03,"binary_str82" ,1626870128249323968
6.055,"binary_str83" ,1626870128249327040
1.74,"binary_str84" ,1626870128249330112
5.77,"binary_str85" ,1626870128249332928
1.97,"binary_str86" ,1626870128249336000
0.3,"binary_str87" ,1626870128249339072
7.145,"binary_str88" ,1626870128249342912
0.88,"binary_str89" ,1626870128249345984
8.025,"binary_str90" ,1626870128249349056
4.81,"binary_str91" ,1626870128249351872
0.725,"binary_str92" ,1626870128249355968
3.85,"binary_str93" ,1626870128249359040
9.455,"binary_str94" ,1626870128249362112
2.265,"binary_str95" ,1626870128249364928
3.985,"binary_str96" ,1626870128249368000
9.375,"binary_str97" ,1626870128249371072
0.2,"binary_str98" ,1626870128249373888
6.95,"binary_str99" ,1626870128249377984
"string0",7,8.615
"string1",4,9.895
"string2",3,2.92
"string3",3,5.62
"string4",7,1.615
"string5",6,1.45
"string6",5,7.48
"string7",7,3.01
"string8",5,4.76
"string9",10,7.09
"string10",2,8.38
"string11",7,8.65
"string12",5,5.025
"string13",10,5.765
"string14",2,4.57
"string15",2,1.03
"string16",7,6.98
"string17",10,0.23
"string18",7,5.815
"string19",1,2.37
"string20",10,8.865
"string21",3,1.235
"string22",2,8.62
"string23",9,1.045
"string24",8,4.34
"string25",1,5.455
"string26",2,4.475
"string27",1,6.95
"string28",2,3.39
"string29",3,6.79
"string30",7,9.735
"string31",1,9.79
"string32",10,9.955
"string33",1,5.095
"string34",3,3.86
"string35",9,5.105
"string36",10,4.22
"string37",1,2.78
"string38",9,6.345
"string39",1,0.975
"string40",5,6.16
"string41",4,7.735
"string42",5,6.6
"string43",8,2.845
"string44",1,0.655
"string45",3,2.995
"string46",9,3.6
"string47",8,3.47
"string48",3,7.98
"string49",6,2.225
"string50",9,5.44
"string51",4,6.335
"string52",3,2.955
"string53",1,0.565
"string54",6,5.575
"string55",6,9.905
"string56",9,6.025
"string57",8,0.94
"string58",10,0.15
"string59",8,1.555
"string60",4,2.28
"string61",2,8.29
"string62",9,6.22
"string63",6,3.35
"string64",10,6.7
"string65",3,9.345
"string66",7,9.815
"string67",1,5.365
"string68",10,3.81
"string69",1,6.405
"string70",8,2.715
"string71",3,8.58
"string72",8,6.34
"string73",2,7.49
"string74",4,8.64
"string75",3,8.995
"string76",7,3.465
"string77",1,7.64
"string78",6,3.65
"string79",6,1.4
"string80",6,5.875
"string81",2,1.22
"string82",5,7.87
"string83",9,8.41
"string84",9,8.9
"string85",9,3.89
"string86",2,5.0
"string87",2,4.495
"string88",4,2.835
"string89",3,5.895
"string90",7,8.41
"string91",5,5.125
"string92",7,9.165
"string93",5,8.315
"string94",10,7.485
"string95",7,4.635
"string96",2,6.015
"string97",8,0.595
"string98",3,8.79
"string99",4,1.72
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 10,
"thread_count_create_tbl": 10,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 100,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "testdb3",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ms",
"keep": 36,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 100,
"childtable_prefix": "tb0_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1000,
"start_timestamp": "2021-07-01 00:00:00.000",
"sample_format": "",
"sample_file": "",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2},
{"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1},
{"type": "BOOL"},{"type": "NCHAR","len":16}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}]
}]
}]
}
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 10,
"thread_count_create_tbl": 10,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 100,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "testdb1",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ns",
"keep": 36,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 100,
"childtable_prefix": "tb0_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1000,
"start_timestamp": "2021-07-01 00:00:00.000",
"sample_format": "",
"sample_file": "",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2},
{"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1},
{"type": "BOOL"},{"type": "NCHAR","len":16}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}]
}]
}]
}
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 10,
"thread_count_create_tbl": 10,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 100,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "testdb2",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "us",
"keep": 36,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 100,
"childtable_prefix": "tb0_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1000,
"start_timestamp": "2021-07-01 00:00:00.000",
"sample_format": "",
"sample_file": "",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2},
{"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1},
{"type": "BOOL"},{"type": "NCHAR","len":16}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}]
}]
}]
}
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
def run(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/"
# insert: create one or mutiple tables per sql and insert multiple rows per sql
# check the params of taosdemo about time_step is nano
os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoInsertNanoDB.json -y " % binPath)
tdSql.execute("use testdb1")
tdSql.query("show stables")
tdSql.checkData(0, 4, 100)
tdSql.query("select count (tbname) from stb0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from tb0_0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 10000)
tdSql.query("describe stb0")
tdSql.getData(9, 1)
tdSql.checkDataType(9, 1,"TIMESTAMP")
tdSql.query("select last(ts) from stb0")
tdSql.checkData(0, 0,"2021-07-01 00:00:00.000099000")
# check the params of taosdemo about time_step is us
os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoInsertUSDB.json -y " % binPath)
tdSql.execute("use testdb2")
tdSql.query("show stables")
tdSql.checkData(0, 4, 100)
tdSql.query("select count (tbname) from stb0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from tb0_0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 10000)
tdSql.query("describe stb0")
tdSql.getData(9, 1)
tdSql.checkDataType(9, 1,"TIMESTAMP")
tdSql.query("select last(ts) from stb0")
tdSql.checkData(0, 0,"2021-07-01 00:00:00.099000")
# check the params of taosdemo about time_step is ms
os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoInsertMSDB.json -y " % binPath)
tdSql.execute("use testdb3")
tdSql.query("show stables")
tdSql.checkData(0, 4, 100)
tdSql.query("select count (tbname) from stb0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from tb0_0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 10000)
tdSql.query("describe stb0")
tdSql.checkDataType(9, 1,"TIMESTAMP")
tdSql.query("select last(ts) from stb0")
tdSql.checkData(0, 0,"2021-07-01 00:01:39.000")
os.system("rm -rf ./res.txt")
os.system("rm -rf ./*.py.sql")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
......@@ -244,17 +244,17 @@ class TDTestCase:
tdSql.query("select count(*) from stb1")
tdSql.checkData(0, 0, 10)
# # insert: sample json
# os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-sample-stmt.json -y " % binPath)
# tdSql.execute("use dbtest123")
# tdSql.query("select c2 from stb0")
# tdSql.checkData(0, 0, 2147483647)
# tdSql.query("select * from stb1 where t1=-127")
# tdSql.checkRows(20)
# tdSql.query("select * from stb1 where t2=127")
# tdSql.checkRows(10)
# tdSql.query("select * from stb1 where t2=126")
# tdSql.checkRows(10)
# insert: sample json
os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-sample-stmt.json -y " % binPath)
tdSql.execute("use dbtest123")
tdSql.query("select c2 from stb0")
tdSql.checkData(0, 0, 2147483647)
tdSql.query("select * from stb1 where t1=-127")
tdSql.checkRows(20)
tdSql.query("select * from stb1 where t2=127")
tdSql.checkRows(10)
tdSql.query("select * from stb1 where t2=126")
tdSql.checkRows(10)
# insert: test interlace parament
os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-interlace-row-stmt.json -y " % binPath)
......
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 10,
"thread_count_create_tbl": 10,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 100,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "nsdb",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ns",
"keep": 36,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 100,
"childtable_prefix": "tb0_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 10000000,
"start_timestamp": "2021-07-01 00:00:00.000",
"sample_format": "",
"sample_file": "",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2},
{"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1},
{"type": "BOOL"},{"type": "NCHAR","len":16}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}]
},
{
"name": "stb1",
"child_table_exists":"no",
"childtable_count": 100,
"childtable_prefix": "tb1_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 10,
"disorder_range": 1000,
"timestamp_step": 10000000,
"start_timestamp": "2021-07-01 00:00:00.000",
"sample_format": "",
"sample_file": "",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2},
{"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1},
{"type": "BOOL"},{"type": "NCHAR","len":16}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}]
}]
}]
}
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 10,
"thread_count_create_tbl": 10,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 100,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "subnsdb",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ns",
"keep": 36,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 10,
"childtable_prefix": "tb0_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "samples",
"insert_mode": "taosc",
"insert_rows": 10,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 10000000,
"start_timestamp": "2021-07-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./tools/taosdemoAllTest/nano_samples.csv",
"tags_file": "./tools/taosdemoAllTest/nano_sampletags.csv",
"columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}],
"tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}]
},
{
"name": "stb1",
"child_table_exists":"no",
"childtable_count": 10,
"childtable_prefix": "tb1_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "samples",
"insert_mode": "taosc",
"insert_rows": 10,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 10,
"disorder_range": 1000,
"timestamp_step": 10000000,
"start_timestamp": "2021-07-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./tools/taosdemoAllTest/nano_samples.csv",
"tags_file": "./tools/taosdemoAllTest/nano_sampletags.csv",
"columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}],
"tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}]
}]
}]
}
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 10,
"thread_count_create_tbl": 10,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 100,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "nsdb2",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ns",
"keep": 36,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 100,
"childtable_prefix": "tb0_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 10,
"start_timestamp": "now",
"sample_format": "",
"sample_file": "",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2},
{"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1},
{"type": "BOOL"},{"type": "NCHAR","len":16}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}]
}]
}]
}
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 10,
"thread_count_create_tbl": 10,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 100,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "nsdbcsv",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ns",
"keep": 36,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 100,
"childtable_prefix": "tb0_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "samples",
"insert_mode": "taosc",
"insert_rows": 100,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 10000000,
"start_timestamp": "2021-07-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./tools/taosdemoAllTest/nano_samples.csv",
"tags_file": "./tools/taosdemoAllTest/nano_sampletags.csv",
"columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}],
"tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}]
},
{
"name": "stb1",
"child_table_exists":"no",
"childtable_count": 100,
"childtable_prefix": "tb1_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "samples",
"insert_mode": "taosc",
"insert_rows": 100,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 10,
"disorder_range": 1000,
"timestamp_step": 10000000,
"start_timestamp": "2021-07-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./tools/taosdemoAllTest/nano_samples.csv",
"tags_file": "./tools/taosdemoAllTest/nano_sampletags.csv",
"columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}],
"tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}]
}]
}]
}
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
def run(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/"
# insert: create one or mutiple tables per sql and insert multiple rows per sql
# insert data from a special timestamp
# check stable stb0
os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabase.json -y " % binPath)
tdSql.execute("use nsdb")
tdSql.query("show stables")
tdSql.checkData(0, 4, 100)
tdSql.query("select count (tbname) from stb0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from tb0_0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 10000)
tdSql.query("describe stb0")
tdSql.checkDataType(9, 1,"TIMESTAMP")
tdSql.query("select last(ts) from stb0")
tdSql.checkData(0, 0,"2021-07-01 00:00:00.990000000")
# check stable stb1 which is insert with disord
tdSql.query("select count (tbname) from stb1")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from tb1_0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from stb1")
tdSql.checkData(0, 0, 10000)
# check c8 is an nano timestamp
tdSql.query("describe stb1")
tdSql.checkDataType(9, 1,"TIMESTAMP")
# check insert timestamp_step is nano_second
tdSql.query("select last(ts) from stb1")
tdSql.checkData(0, 0,"2021-07-01 00:00:00.990000000")
# insert data from now time
# check stable stb0
os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json -y " % binPath)
tdSql.execute("use nsdb2")
tdSql.query("show stables")
tdSql.checkData(0, 4, 100)
tdSql.query("select count (tbname) from stb0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from tb0_0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 10000)
# check c8 is an nano timestamp
tdSql.query("describe stb0")
tdSql.checkDataType(9,1,"TIMESTAMP")
# insert by csv files and timetamp is long int , strings in ts and cols
os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json -y " % binPath)
tdSql.execute("use nsdbcsv")
tdSql.query("show stables")
tdSql.checkData(0, 4, 100)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 10000)
tdSql.query("describe stb0")
tdSql.checkDataType(3, 1, "TIMESTAMP")
tdSql.query("select count(*) from stb0 where ts > \"2021-07-01 00:00:00.490000000\"")
tdSql.checkData(0, 0, 5000)
tdSql.query("select count(*) from stb0 where ts < 1626918583000000000")
tdSql.checkData(0, 0, 10000)
os.system("rm -rf ./insert_res.txt")
os.system("rm -rf tools/taosdemoAllTest/taosdemoTestSupportNano*.py.sql")
# taosdemo test insert with command and parameter , detals show taosdemo --help
os.system("%staosdemo -u root -P taosdata -p 6030 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath)
tdSql.query("select count(*) from test.meters")
tdSql.checkData(0, 0, 600)
# check taosdemo -s
sqls_ls = ['drop database if exists nsdbsql;','create database nsdbsql precision "ns" keep 36 days 6 update 1;',
'use nsdbsql;','CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int);',
'CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2);',
'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32);',
'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 85, 32, 0.76);']
with open("./taosdemoTestNanoCreateDB.sql",mode ="a" ) as sql_files:
for sql in sqls_ls:
sql_files.write(sql+"\n")
sql_files.close()
sleep(10)
os.system("%staosdemo -s taosdemoTestNanoCreateDB.sql -y " % binPath)
tdSql.query("select count(*) from nsdbsql.meters")
tdSql.checkData(0, 0, 2)
os.system("rm -rf ./res.txt")
os.system("rm -rf ./*.py.sql")
os.system("rm -rf ./taosdemoTestNanoCreateDB.sql")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
{
"filetype": "query",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"confirm_parameter_prompt": "no",
"databases": "nsdb",
"query_times": 10,
"query_mode": "taosc",
"specified_table_query": {
"query_interval": 1,
"concurrent": 2,
"sqls": [
{
"sql": "select count(*) from stb0 where ts>\"2021-07-01 00:01:00.000000000 \" ;",
"result": "./query_res0.txt"
},
{
"sql": "select count(*) from stb0 where ts>\"2021-07-01 00:01:00.000000000\" and ts <=\"2021-07-01 00:01:10.000000000\" ;",
"result": "./query_res1.txt"
},
{
"sql": "select count(*) from stb0 where ts>now-20d ;",
"result": "./query_res2.txt"
},
{
"sql": "select max(c10) from stb0;",
"result": "./query_res3.txt"
},
{
"sql": "select min(c1) from stb0;",
"result": "./query_res4.txt"
},
{
"sql": "select avg(c1) from stb0;",
"result": "./query_res5.txt"
},
{
"sql":"select count(*) from stb0 group by tbname;",
"result":"./query_res6.txt"
}
]
},
"super_table_query": {
"stblname": "stb0",
"query_interval": 0,
"threads": 4,
"sqls": [
{
"sql": "select count(*) from xxxx where ts>\"2021-07-01 00:01:00.000000000 \" ;",
"result": "./query_res_tb0.txt"
},
{
"sql":"select count(*) from xxxx where ts>\"2021-07-01 00:01:00.000000000\" and ts <=\"2021-07-01 00:01:10.000000000\" ;",
"result": "./query_res_tb1.txt"
},
{
"sql":"select first(*) from xxxx ;",
"result": "./query_res_tb2.txt"
},
{
"sql":"select last(*) from xxxx;",
"result": "./query_res_tb3.txt"
},
{
"sql":"select last_row(*) from xxxx ;",
"result": "./query_res_tb4.txt"
},
{
"sql":"select max(c10) from xxxx ;",
"result": "./query_res_tb5.txt"
},
{
"sql":"select min(c1) from xxxx ;",
"result": "./query_res_tb6.txt"
},
{
"sql":"select avg(c10) from xxxx ;",
"result": "./query_res_tb7.txt"
}
]
}
}
\ No newline at end of file
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
def run(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/"
# query: query test for nanoSecond with where and max min groupby order
os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabase.json -y " % binPath)
tdSql.execute("use nsdb")
# use where to filter
tdSql.query("select count(*) from stb0 where ts>\"2021-07-01 00:00:00.590000000 \" ")
tdSql.checkData(0, 0, 4000)
tdSql.query("select count(*) from stb0 where ts>\"2021-07-01 00:00:00.000000000\" and ts <=\"2021-07-01 00:00:00.590000000\" ")
tdSql.checkData(0, 0, 5900)
tdSql.query("select count(*) from tb0_0 where ts>\"2021-07-01 00:00:00.590000000 \" ;")
tdSql.checkData(0, 0, 40)
tdSql.query("select count(*) from tb0_0 where ts>\"2021-07-01 00:00:00.000000000\" and ts <=\"2021-07-01 00:00:00.590000000\" ")
tdSql.checkData(0, 0, 59)
# select max min avg from special col
tdSql.query("select max(c10) from stb0;")
print("select max(c10) from stb0 : " , tdSql.getData(0, 0))
tdSql.query("select max(c10) from tb0_0;")
print("select max(c10) from tb0_0 : " , tdSql.getData(0, 0))
tdSql.query("select min(c1) from stb0;")
print( "select min(c1) from stb0 : " , tdSql.getData(0, 0))
tdSql.query("select min(c1) from tb0_0;")
print( "select min(c1) from tb0_0 : " , tdSql.getData(0, 0))
tdSql.query("select avg(c1) from stb0;")
print( "select avg(c1) from stb0 : " , tdSql.getData(0, 0))
tdSql.query("select avg(c1) from tb0_0;")
print( "select avg(c1) from tb0_0 : " , tdSql.getData(0, 0))
tdSql.query("select count(*) from stb0 group by tbname;")
tdSql.checkData(0, 0, 100)
tdSql.checkData(10, 0, 100)
# query : query above sqls by taosdemo and continuously
os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.json -y " % binPath)
os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json -y " % binPath)
tdSql.execute("use nsdbcsv")
tdSql.query("show stables")
tdSql.checkData(0, 4, 100)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 10000)
tdSql.query("describe stb0")
tdSql.checkDataType(3, 1, "TIMESTAMP")
tdSql.query("select count(*) from stb0 where ts >\"2021-07-01 00:00:00.490000000\"")
tdSql.checkData(0, 0, 5000)
tdSql.query("select count(*) from stb0 where ts <now -1d-1h-3s")
tdSql.checkData(0, 0, 10000)
tdSql.query("select count(*) from stb0 where ts < 1626918583000000000")
tdSql.checkData(0, 0, 10000)
tdSql.execute('select count(*) from stb0 where c2 > 162687012800000000')
tdSql.execute('select count(*) from stb0 where c2 < 162687012800000000')
tdSql.execute('select count(*) from stb0 where c2 = 162687012800000000')
tdSql.execute('select count(*) from stb0 where c2 != 162687012800000000')
tdSql.execute('select count(*) from stb0 where c2 <> 162687012800000000')
tdSql.execute('select count(*) from stb0 where c2 > "2021-07-21 20:22:08.248246976"')
tdSql.execute('select count(*) from stb0 where c2 < "2021-07-21 20:22:08.248246976"')
tdSql.execute('select count(*) from stb0 where c2 = "2021-07-21 20:22:08.248246976"')
tdSql.execute('select count(*) from stb0 where c2 != "2021-07-21 20:22:08.248246976"')
tdSql.execute('select count(*) from stb0 where c2 <> "2021-07-21 20:22:08.248246976"')
tdSql.execute('select count(*) from stb0 where ts between "2021-07-01 00:00:00.000000000" and "2021-07-01 00:00:00.990000000"')
tdSql.execute('select count(*) from stb0 where ts between 1625068800000000000 and 1625068801000000000')
tdSql.query('select avg(c0) from stb0 interval(5000000000b)')
tdSql.checkRows(1)
tdSql.query('select avg(c0) from stb0 interval(100000000b)')
tdSql.checkRows(10)
tdSql.error('select avg(c0) from stb0 interval(1b)')
tdSql.error('select avg(c0) from stb0 interval(999b)')
tdSql.query('select avg(c0) from stb0 interval(1000b)')
tdSql.checkRows(100)
tdSql.query('select avg(c0) from stb0 interval(1u)')
tdSql.checkRows(100)
tdSql.query('select avg(c0) from stb0 interval(100000000b) sliding (100000000b)')
tdSql.checkRows(10)
# query : query above sqls by taosdemo and continuously
os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestSupportNanoQuerycsv.json -y " % binPath)
os.system("rm -rf ./query_res*.txt*")
os.system("rm -rf tools/taosdemoAllTest/*.py.sql")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
{
"filetype": "query",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"confirm_parameter_prompt": "no",
"databases": "nsdbcsv",
"query_times": 10,
"query_mode": "taosc",
"specified_table_query": {
"query_interval": 1,
"concurrent": 2,
"sqls": [
{
"sql": "select count(*) from stb0 where ts> \"2021-07-01 00:00:00.490000000\" ;",
"result": "./query_res0.txt"
},
{
"sql": "select count(*) from stb0 where ts < now -22d-1h-3s ;",
"result": "./query_res1.txt"
},
{
"sql": "select count(*) from stb0 where ts < 1626918583000000000 ;",
"result": "./query_res2.txt"
},
{
"sql": "select count(*) from stb0 where c2 <> 162687012800000000';",
"result": "./query_res3.txt"
},
{
"sql": "select count(*) from stb0 where c2 != \"2021-07-21 20:22:08.248246976\";",
"result": "./query_res4.txt"
},
{
"sql": "select count(*) from stb0 where ts between \"2021-07-01 00:00:00.000000000\" and \"2021-07-01 00:00:00.990000000\";",
"result": "./query_res5.txt"
},
{
"sql":"select count(*) from stb0 group by tbname;",
"result":"./query_res6.txt"
},
{
"sql":"select count(*) from stb0 where ts between 1625068800000000000 and 1625068801000000000;",
"result":"./query_res7.txt"
},
{
"sql":"select avg(c0) from stb0 interval(5000000000b);",
"result":"./query_res8.txt"
},
{
"sql":"select avg(c0) from stb0 interval(100000000b) sliding (100000000b);",
"result":"./query_res9.txt"
}
]
},
"super_table_query": {
"stblname": "stb0",
"query_interval": 0,
"threads": 4,
"sqls": [
{
"sql": "select count(*) from xxxx where ts > \"2021-07-01 00:00:00.490000000\" ;",
"result": "./query_res_tb0.txt"
},
{
"sql":"select count(*) from xxxx where ts between \"2021-07-01 00:00:00.000000000\" and \"2021-07-01 00:00:00.990000000\" ;",
"result": "./query_res_tb1.txt"
},
{
"sql":"select first(*) from xxxx ;",
"result": "./query_res_tb2.txt"
},
{
"sql":"select last(*) from xxxx;",
"result": "./query_res_tb3.txt"
},
{
"sql":"select last_row(*) from xxxx ;",
"result": "./query_res_tb4.txt"
},
{
"sql":"select max(c0) from xxxx ;",
"result": "./query_res_tb5.txt"
},
{
"sql":"select min(c0) from xxxx ;",
"result": "./query_res_tb6.txt"
},
{
"sql":"select avg(c0) from xxxx ;",
"result": "./query_res_tb7.txt"
},
{
"sql":"select avg(c0) from xxxx interval(100000000b) sliding (100000000b) ;",
"result": "./query_res_tb8.txt"
}
]
}
}
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册