提交 6f29dc50 编写于 作者: W wpan

Merge branch 'master' into hotfix/TD-5868

......@@ -25,15 +25,14 @@ steps:
- master
---
kind: pipeline
name: test_arm64
name: test_arm64_bionic
platform:
os: linux
arch: arm64
steps:
- name: build
image: gcc
image: arm64v8/ubuntu:bionic
commands:
- apt-get update
- apt-get install -y cmake build-essential
......@@ -48,9 +47,87 @@ steps:
branch:
- develop
- master
- 2.0
---
kind: pipeline
name: test_arm
name: test_arm64_focal
platform:
os: linux
arch: arm64
steps:
- name: build
image: arm64v8/ubuntu:focal
commands:
- echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections
- apt-get update
- apt-get install -y -qq cmake build-essential
- mkdir debug
- cd debug
- cmake .. -DCPUTYPE=aarch64 > /dev/null
- make
trigger:
event:
- pull_request
when:
branch:
- develop
- master
- 2.0
---
kind: pipeline
name: test_arm64_centos7
platform:
os: linux
arch: arm64
steps:
- name: build
image: arm64v8/centos:7
commands:
- yum install -y gcc gcc-c++ make cmake git
- mkdir debug
- cd debug
- cmake .. -DCPUTYPE=aarch64 > /dev/null
- make
trigger:
event:
- pull_request
when:
branch:
- develop
- master
- 2.0
---
kind: pipeline
name: test_arm64_centos8
platform:
os: linux
arch: arm64
steps:
- name: build
image: arm64v8/centos:8
commands:
- dnf install -y gcc gcc-c++ make cmake epel-release git libarchive
- mkdir debug
- cd debug
- cmake .. -DCPUTYPE=aarch64 > /dev/null
- make
trigger:
event:
- pull_request
when:
branch:
- develop
- master
- 2.0
---
kind: pipeline
name: test_arm_bionic
platform:
os: linux
......@@ -73,7 +150,6 @@ steps:
branch:
- develop
- master
---
kind: pipeline
name: build_trusty
......@@ -174,25 +250,3 @@ steps:
- develop
- master
---
kind: pipeline
name: goodbye
platform:
os: linux
arch: amd64
steps:
- name: 64-bit
image: alpine
commands:
- echo 64-bit is good.
when:
branch:
- develop
- master
depends_on:
- test_arm64
- test_amd64
\ No newline at end of file
......@@ -168,7 +168,7 @@ pipeline {
steps {
pre_test()
timeout(time: 45, unit: 'MINUTES'){
timeout(time: 55, unit: 'MINUTES'){
sh '''
date
cd ${WKC}/tests
......@@ -183,7 +183,7 @@ pipeline {
steps {
pre_test()
timeout(time: 45, unit: 'MINUTES'){
timeout(time: 55, unit: 'MINUTES'){
sh '''
date
cd ${WKC}/tests
......@@ -195,7 +195,7 @@ pipeline {
stage('python_3_s6') {
agent{label 'p3'}
steps {
timeout(time: 45, unit: 'MINUTES'){
timeout(time: 55, unit: 'MINUTES'){
pre_test()
sh '''
date
......@@ -208,7 +208,7 @@ pipeline {
stage('test_b1_s2') {
agent{label 'b1'}
steps {
timeout(time: 45, unit: 'MINUTES'){
timeout(time: 55, unit: 'MINUTES'){
pre_test()
sh '''
cd ${WKC}/tests
......@@ -245,7 +245,7 @@ pipeline {
./handle_taosd_val_log.sh
'''
}
timeout(time: 45, unit: 'MINUTES'){
timeout(time: 55, unit: 'MINUTES'){
sh '''
date
cd ${WKC}/tests
......@@ -269,7 +269,7 @@ pipeline {
./handle_val_log.sh
'''
}
timeout(time: 45, unit: 'MINUTES'){
timeout(time: 55, unit: 'MINUTES'){
sh '''
date
cd ${WKC}/tests
......@@ -286,7 +286,7 @@ pipeline {
stage('test_b4_s7') {
agent{label 'b4'}
steps {
timeout(time: 45, unit: 'MINUTES'){
timeout(time: 55, unit: 'MINUTES'){
pre_test()
sh '''
date
......@@ -305,7 +305,7 @@ pipeline {
stage('test_b5_s8') {
agent{label 'b5'}
steps {
timeout(time: 45, unit: 'MINUTES'){
timeout(time: 55, unit: 'MINUTES'){
pre_test()
sh '''
date
......@@ -318,7 +318,7 @@ pipeline {
stage('test_b6_s9') {
agent{label 'b6'}
steps {
timeout(time: 45, unit: 'MINUTES'){
timeout(time: 55, unit: 'MINUTES'){
pre_test()
sh '''
date
......@@ -331,7 +331,7 @@ pipeline {
stage('test_b7_s10') {
agent{label 'b7'}
steps {
timeout(time: 45, unit: 'MINUTES'){
timeout(time: 55, unit: 'MINUTES'){
pre_test()
sh '''
date
......
......@@ -35,7 +35,7 @@ fi
if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/taosd
strip ${build_dir}/bin/taos
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh"
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh ${script_dir}/startPre.sh"
else
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator\
${script_dir}/remove.sh ${script_dir}/set_core.sh ${script_dir}/startPre.sh ${script_dir}/taosd-dump-cfg.gdb"
......
此差异已折叠。
此差异已折叠。
......@@ -299,7 +299,7 @@ static int fillColumnsNull(STableDataBlocks* pBlock, int32_t rowNum) {
SSchema *schema = (SSchema*)pBlock->pTableMeta->schema;
for (int32_t i = 0; i < spd->numOfCols; ++i) {
if (!spd->cols[i].hasVal) { // current column do not have any value to insert, set it to null
if (spd->cols[i].valStat == VAL_STAT_NONE) { // current column do not have any value to insert, set it to null
for (int32_t n = 0; n < rowNum; ++n) {
char *ptr = pBlock->pData + sizeof(SSubmitBlk) + pBlock->rowSize * n + offset;
......
......@@ -2395,8 +2395,8 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
SColumn* x = taosArrayGetP(pNewQueryInfo->colList, index1);
tscColumnCopy(x, pCol);
} else {
SColumn *p = tscColumnClone(pCol);
taosArrayPush(pNewQueryInfo->colList, &p);
SSchema ss = {.type = (uint8_t)pCol->info.type, .bytes = pCol->info.bytes, .colId = (int16_t)pCol->columnIndex};
tscColumnListInsert(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid, &ss);
}
}
}
......
......@@ -1776,101 +1776,6 @@ int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, i
return TSDB_CODE_SUCCESS;
}
static SMemRow tdGenMemRowFromBuilder(SMemRowBuilder* pBuilder) {
SSchema* pSchema = pBuilder->pSchema;
char* p = (char*)pBuilder->buf;
int toffset = 0;
uint16_t nCols = pBuilder->nCols;
uint8_t memRowType = payloadType(p);
uint16_t nColsBound = payloadNCols(p);
if (pBuilder->nCols <= 0 || nColsBound <= 0) {
return NULL;
}
char* pVals = POINTER_SHIFT(p, payloadValuesOffset(p));
SMemRow* memRow = (SMemRow)pBuilder->pDataBlock;
memRowSetType(memRow, memRowType);
// ----------------- Raw payload structure for row:
/* |<------------ Head ------------->|<----------- body of column data tuple ------------------->|
* | |<----------------- flen ------------->|<--- value part --->|
* |SMemRowType| dataTLen | nCols | colId | colType | offset | ... | value |...|...|... |
* +-----------+----------+----------+--------------------------------------|--------------------|
* | uint8_t | uint32_t | uint16_t | int16_t | uint8_t | uint16_t | ... |.......|...|...|... |
* +-----------+----------+----------+--------------------------------------+--------------------|
* 1. offset in column data tuple starts from the value part in case of uint16_t overflow.
* 2. dataTLen: total length including the header and body.
*/
if (memRowType == SMEM_ROW_DATA) {
SDataRow trow = (SDataRow)memRowDataBody(memRow);
dataRowSetLen(trow, (TDRowLenT)(TD_DATA_ROW_HEAD_SIZE + pBuilder->flen));
dataRowSetVersion(trow, pBuilder->sversion);
p = (char*)payloadBody(pBuilder->buf);
uint16_t i = 0, j = 0;
while (j < nCols) {
if (i >= nColsBound) {
break;
}
int16_t colId = payloadColId(p);
if (colId == pSchema[j].colId) {
// ASSERT(payloadColType(p) == pSchema[j].type);
tdAppendColVal(trow, POINTER_SHIFT(pVals, payloadColOffset(p)), pSchema[j].type, toffset);
toffset += TYPE_BYTES[pSchema[j].type];
p = payloadNextCol(p);
++i;
++j;
} else if (colId < pSchema[j].colId) {
p = payloadNextCol(p);
++i;
} else {
tdAppendColVal(trow, getNullValue(pSchema[j].type), pSchema[j].type, toffset);
toffset += TYPE_BYTES[pSchema[j].type];
++j;
}
}
while (j < nCols) {
tdAppendColVal(trow, getNullValue(pSchema[j].type), pSchema[j].type, toffset);
toffset += TYPE_BYTES[pSchema[j].type];
++j;
}
#if 0 // no need anymore
while (i < nColsBound) {
p = payloadNextCol(p);
++i;
}
#endif
} else if (memRowType == SMEM_ROW_KV) {
SKVRow kvRow = (SKVRow)memRowKvBody(memRow);
kvRowSetLen(kvRow, (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nColsBound));
kvRowSetNCols(kvRow, nColsBound);
memRowSetKvVersion(memRow, pBuilder->sversion);
p = (char*)payloadBody(pBuilder->buf);
int i = 0;
while (i < nColsBound) {
int16_t colId = payloadColId(p);
uint8_t colType = payloadColType(p);
tdAppendKvColVal(kvRow, POINTER_SHIFT(pVals,payloadColOffset(p)), colId, colType, &toffset);
//toffset += sizeof(SColIdx);
p = payloadNextCol(p);
++i;
}
} else {
ASSERT(0);
}
int32_t rowTLen = memRowTLen(memRow);
pBuilder->pDataBlock = (char*)pBuilder->pDataBlock + rowTLen; // next row
pBuilder->pSubmitBlk->dataLen += rowTLen;
return memRow;
}
// Erase the empty space reserved for binary data
static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SInsertStatementParam* insertParam,
SBlockKeyTuple* blkKeyTuple) {
......@@ -1902,10 +1807,11 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SI
int32_t schemaSize = sizeof(STColumn) * numOfCols;
pBlock->schemaLen = schemaSize;
} else {
for (int32_t j = 0; j < tinfo.numOfColumns; ++j) {
flen += TYPE_BYTES[pSchema[j].type];
if (IS_RAW_PAYLOAD(insertParam->payloadType)) {
for (int32_t j = 0; j < tinfo.numOfColumns; ++j) {
flen += TYPE_BYTES[pSchema[j].type];
}
}
pBlock->schemaLen = 0;
}
......@@ -1932,18 +1838,19 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SI
pBlock->dataLen += memRowTLen(memRow);
}
} else {
SMemRowBuilder rowBuilder;
rowBuilder.pSchema = pSchema;
rowBuilder.sversion = pTableMeta->sversion;
rowBuilder.flen = flen;
rowBuilder.nCols = tinfo.numOfColumns;
rowBuilder.pDataBlock = pDataBlock;
rowBuilder.pSubmitBlk = pBlock;
rowBuilder.buf = p;
for (int32_t i = 0; i < numOfRows; ++i) {
rowBuilder.buf = (blkKeyTuple + i)->payloadAddr;
tdGenMemRowFromBuilder(&rowBuilder);
char* payload = (blkKeyTuple + i)->payloadAddr;
if (isNeedConvertRow(payload)) {
convertSMemRow(pDataBlock, payload, pTableDataBlock);
TDRowTLenT rowTLen = memRowTLen(pDataBlock);
pDataBlock = POINTER_SHIFT(pDataBlock, rowTLen);
pBlock->dataLen += rowTLen;
} else {
TDRowTLenT rowTLen = memRowTLen(payload);
memcpy(pDataBlock, payload, rowTLen);
pDataBlock = POINTER_SHIFT(pDataBlock, rowTLen);
pBlock->dataLen += rowTLen;
}
}
}
......@@ -1956,9 +1863,9 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SI
static int32_t getRowExpandSize(STableMeta* pTableMeta) {
int32_t result = TD_MEM_ROW_DATA_HEAD_SIZE;
int32_t columns = tscGetNumOfColumns(pTableMeta);
int32_t columns = tscGetNumOfColumns(pTableMeta);
SSchema* pSchema = tscGetTableSchema(pTableMeta);
for(int32_t i = 0; i < columns; i++) {
for (int32_t i = 0; i < columns; i++) {
if (IS_VAR_DATA_TYPE((pSchema + i)->type)) {
result += TYPE_BYTES[TSDB_DATA_TYPE_BINARY];
}
......@@ -2004,7 +1911,7 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl
SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData;
if (pBlocks->numOfRows > 0) {
// the maximum expanded size in byte when a row-wise data is converted to SDataRow format
int32_t expandSize = getRowExpandSize(pOneTableBlock->pTableMeta);
int32_t expandSize = isRawPayload ? getRowExpandSize(pOneTableBlock->pTableMeta) : 0;
STableDataBlocks* dataBuf = NULL;
int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE,
......@@ -2017,7 +1924,8 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl
return ret;
}
int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize +
sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
if (dataBuf->nAllocSize < destSize) {
dataBuf->nAllocSize = (uint32_t)(destSize * 1.5);
......@@ -2061,7 +1969,9 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl
pBlocks->numOfRows, pBlocks->sversion, blkKeyInfo.pKeyTuple->skey, pLastKeyTuple->skey);
}
int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
int32_t len = pBlocks->numOfRows *
(isRawPayload ? (pOneTableBlock->rowSize + expandSize) : getExtendedRowSize(pOneTableBlock)) +
sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
pBlocks->tid = htonl(pBlocks->tid);
pBlocks->uid = htobe64(pBlocks->uid);
......
......@@ -186,6 +186,7 @@ typedef void *SDataRow;
#define TD_DATA_ROW_HEAD_SIZE (sizeof(uint16_t) + sizeof(int16_t))
#define dataRowLen(r) (*(TDRowLenT *)(r)) // 0~65535
#define dataRowEnd(r) POINTER_SHIFT(r, dataRowLen(r))
#define dataRowVersion(r) (*(int16_t *)POINTER_SHIFT(r, sizeof(int16_t)))
#define dataRowTuple(r) POINTER_SHIFT(r, TD_DATA_ROW_HEAD_SIZE)
#define dataRowTKey(r) (*(TKEY *)(dataRowTuple(r)))
......@@ -201,14 +202,18 @@ void tdFreeDataRow(SDataRow row);
void tdInitDataRow(SDataRow row, STSchema *pSchema);
SDataRow tdDataRowDup(SDataRow row);
// offset here not include dataRow header length
static FORCE_INLINE int tdAppendColVal(SDataRow row, const void *value, int8_t type, int32_t offset) {
static FORCE_INLINE int tdAppendDataColVal(SDataRow row, const void *value, bool isCopyVarData, int8_t type,
int32_t offset) {
ASSERT(value != NULL);
int32_t toffset = offset + TD_DATA_ROW_HEAD_SIZE;
if (IS_VAR_DATA_TYPE(type)) {
*(VarDataOffsetT *)POINTER_SHIFT(row, toffset) = dataRowLen(row);
memcpy(POINTER_SHIFT(row, dataRowLen(row)), value, varDataTLen(value));
if (isCopyVarData) {
memcpy(POINTER_SHIFT(row, dataRowLen(row)), value, varDataTLen(value));
}
dataRowLen(row) += varDataTLen(value);
} else {
if (offset == 0) {
......@@ -223,6 +228,12 @@ static FORCE_INLINE int tdAppendColVal(SDataRow row, const void *value, int8_t t
return 0;
}
// offset here not include dataRow header length
static FORCE_INLINE int tdAppendColVal(SDataRow row, const void *value, int8_t type, int32_t offset) {
return tdAppendDataColVal(row, value, true, type, offset);
}
// NOTE: offset here including the header size
static FORCE_INLINE void *tdGetRowDataOfCol(SDataRow row, int8_t type, int32_t offset) {
if (IS_VAR_DATA_TYPE(type)) {
......@@ -328,11 +339,10 @@ static FORCE_INLINE void dataColReset(SDataCol *pDataCol) { pDataCol->len = 0; }
int tdAllocMemForCol(SDataCol *pCol, int maxPoints);
void dataColInit(SDataCol *pDataCol, STColumn *pCol, int maxPoints);
void dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints);
int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints);
void dataColSetOffset(SDataCol *pCol, int nEle);
bool isNEleNull(SDataCol *pCol, int nEle);
void dataColSetNEleNull(SDataCol *pCol, int nEle, int maxPoints);
// Get the data pointer from a column-wised data
static FORCE_INLINE const void *tdGetColDataOfRow(SDataCol *pCol, int row) {
......@@ -357,13 +367,11 @@ static FORCE_INLINE int32_t dataColGetNEleLen(SDataCol *pDataCol, int rows) {
}
typedef struct {
int maxRowSize;
int maxCols; // max number of columns
int maxPoints; // max number of points
int numOfRows;
int numOfCols; // Total number of cols
int sversion; // TODO: set sversion
int maxCols; // max number of columns
int maxPoints; // max number of points
int numOfRows;
int numOfCols; // Total number of cols
int sversion; // TODO: set sversion
SDataCol *cols;
} SDataCols;
......@@ -407,7 +415,7 @@ static FORCE_INLINE TSKEY dataColsKeyLast(SDataCols *pCols) {
}
}
SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows);
SDataCols *tdNewDataCols(int maxCols, int maxRows);
void tdResetDataCols(SDataCols *pCols);
int tdInitDataCols(SDataCols *pCols, STSchema *pSchema);
SDataCols *tdDupDataCols(SDataCols *pCols, bool keepData);
......@@ -475,9 +483,10 @@ static FORCE_INLINE void *tdGetKVRowIdxOfCol(SKVRow row, int16_t colId) {
}
// offset here not include kvRow header length
static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t colId, int8_t type, int32_t *offset) {
static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, bool isCopyValData, int16_t colId, int8_t type,
int32_t offset) {
ASSERT(value != NULL);
int32_t toffset = *offset + TD_KV_ROW_HEAD_SIZE;
int32_t toffset = offset + TD_KV_ROW_HEAD_SIZE;
SColIdx *pColIdx = (SColIdx *)POINTER_SHIFT(row, toffset);
char * ptr = (char *)POINTER_SHIFT(row, kvRowLen(row));
......@@ -485,10 +494,12 @@ static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t
pColIdx->offset = kvRowLen(row); // offset of pColIdx including the TD_KV_ROW_HEAD_SIZE
if (IS_VAR_DATA_TYPE(type)) {
memcpy(ptr, value, varDataTLen(value));
if (isCopyValData) {
memcpy(ptr, value, varDataTLen(value));
}
kvRowLen(row) += varDataTLen(value);
} else {
if (*offset == 0) {
if (offset == 0) {
ASSERT(type == TSDB_DATA_TYPE_TIMESTAMP);
TKEY tvalue = tdGetTKEY(*(TSKEY *)value);
memcpy(ptr, (void *)(&tvalue), TYPE_BYTES[type]);
......@@ -497,7 +508,6 @@ static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t
}
kvRowLen(row) += TYPE_BYTES[type];
}
*offset += sizeof(SColIdx);
return 0;
}
......@@ -592,12 +602,24 @@ typedef void *SMemRow;
#define TD_MEM_ROW_DATA_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_DATA_ROW_HEAD_SIZE)
#define TD_MEM_ROW_KV_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_MEM_ROW_KV_VER_SIZE + TD_KV_ROW_HEAD_SIZE)
#define SMEM_ROW_DATA 0U // SDataRow
#define SMEM_ROW_KV 1U // SKVRow
#define SMEM_ROW_DATA 0x0U // SDataRow
#define SMEM_ROW_KV 0x01U // SKVRow
#define SMEM_ROW_CONVERT 0x80U // SMemRow convert flag
#define KVRatioKV (0.2f) // all bool
#define KVRatioPredict (0.4f)
#define KVRatioData (0.75f) // all bigint
#define KVRatioConvert (0.9f)
#define memRowType(r) (*(uint8_t *)(r))
#define memRowType(r) ((*(uint8_t *)(r)) & 0x01)
#define memRowSetType(r, t) ((*(uint8_t *)(r)) = (t)) // set the total byte in case of dirty memory
#define memRowSetConvert(r) ((*(uint8_t *)(r)) = (((*(uint8_t *)(r)) & 0x7F) | SMEM_ROW_CONVERT)) // highest bit
#define isDataRowT(t) (SMEM_ROW_DATA == (((uint8_t)(t)) & 0x01))
#define isDataRow(r) (SMEM_ROW_DATA == memRowType(r))
#define isKvRowT(t) (SMEM_ROW_KV == (((uint8_t)(t)) & 0x01))
#define isKvRow(r) (SMEM_ROW_KV == memRowType(r))
#define isNeedConvertRow(r) (((*(uint8_t *)(r)) & 0x80) == SMEM_ROW_CONVERT)
#define memRowDataBody(r) POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE) // section after flag
#define memRowKvBody(r) \
......@@ -614,6 +636,14 @@ typedef void *SMemRow;
#define memRowLen(r) (isDataRow(r) ? memRowDataLen(r) : memRowKvLen(r))
#define memRowTLen(r) (isDataRow(r) ? memRowDataTLen(r) : memRowKvTLen(r)) // using uint32_t/int32_t to store the TLen
static FORCE_INLINE char *memRowEnd(SMemRow row) {
if (isDataRow(row)) {
return (char *)dataRowEnd(memRowDataBody(row));
} else {
return (char *)kvRowEnd(memRowKvBody(row));
}
}
#define memRowDataVersion(r) dataRowVersion(memRowDataBody(r))
#define memRowKvVersion(r) (*(int16_t *)POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE))
#define memRowVersion(r) (isDataRow(r) ? memRowDataVersion(r) : memRowKvVersion(r)) // schema version
......@@ -631,7 +661,6 @@ typedef void *SMemRow;
} \
} while (0)
#define memRowSetType(r, t) (memRowType(r) = (t))
#define memRowSetLen(r, l) (isDataRow(r) ? memRowDataLen(r) = (l) : memRowKvLen(r) = (l))
#define memRowSetVersion(r, v) (isDataRow(r) ? dataRowSetVersion(memRowDataBody(r), v) : memRowSetKvVersion(r, v))
#define memRowCpy(dst, r) memcpy((dst), (r), memRowTLen(r))
......@@ -664,12 +693,12 @@ static FORCE_INLINE void *tdGetMemRowDataOfColEx(void *row, int16_t colId, int8_
}
}
static FORCE_INLINE int tdAppendMemColVal(SMemRow row, const void *value, int16_t colId, int8_t type, int32_t offset,
int32_t *kvOffset) {
static FORCE_INLINE int tdAppendMemRowColVal(SMemRow row, const void *value, bool isCopyVarData, int16_t colId,
int8_t type, int32_t offset) {
if (isDataRow(row)) {
tdAppendColVal(memRowDataBody(row), value, type, offset);
tdAppendDataColVal(memRowDataBody(row), value, isCopyVarData, type, offset);
} else {
tdAppendKvColVal(memRowKvBody(row), value, colId, type, kvOffset);
tdAppendKvColVal(memRowKvBody(row), value, isCopyVarData, colId, type, offset);
}
return 0;
}
......@@ -691,6 +720,30 @@ static FORCE_INLINE int32_t tdGetColAppendLen(uint8_t rowType, const void *value
return len;
}
/**
* 1. calculate the delta of AllNullLen for SDataRow.
* 2. calculate the real len for SKVRow.
*/
static FORCE_INLINE void tdGetColAppendDeltaLen(const void *value, int8_t colType, int32_t *dataLen, int32_t *kvLen) {
switch (colType) {
case TSDB_DATA_TYPE_BINARY: {
int32_t varLen = varDataLen(value);
*dataLen += (varLen - CHAR_BYTES);
*kvLen += (varLen + sizeof(SColIdx));
break;
}
case TSDB_DATA_TYPE_NCHAR: {
int32_t varLen = varDataLen(value);
*dataLen += (varLen - TSDB_NCHAR_SIZE);
*kvLen += (varLen + sizeof(SColIdx));
break;
}
default: {
*kvLen += (TYPE_BYTES[colType] + sizeof(SColIdx));
break;
}
}
}
typedef struct {
int16_t colId;
......@@ -706,7 +759,7 @@ static FORCE_INLINE void setSColInfo(SColInfo* colInfo, int16_t colId, uint8_t c
SMemRow mergeTwoMemRows(void *buffer, SMemRow row1, SMemRow row2, STSchema *pSchema1, STSchema *pSchema2);
#if 0
// ----------------- Raw payload structure for row:
/* |<------------ Head ------------->|<----------- body of column data tuple ------------------->|
* | |<----------------- flen ------------->|<--- value part --->|
......@@ -752,6 +805,8 @@ SMemRow mergeTwoMemRows(void *buffer, SMemRow row1, SMemRow row2, STSchema *pSch
static FORCE_INLINE char *payloadNextCol(char *pCol) { return (char *)POINTER_SHIFT(pCol, PAYLOAD_COL_HEAD_LEN); }
#endif
#ifdef __cplusplus
}
#endif
......
......@@ -19,10 +19,10 @@
#include "wchar.h"
#include "tarray.h"
static void dataColSetNEleNull(SDataCol *pCol, int nEle);
static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2,
int limit2, int tRows, bool forceSetNull);
//TODO: change caller to use return val
int tdAllocMemForCol(SDataCol *pCol, int maxPoints) {
int spaceNeeded = pCol->bytes * maxPoints;
if(IS_VAR_DATA_TYPE(pCol->type)) {
......@@ -31,7 +31,7 @@ int tdAllocMemForCol(SDataCol *pCol, int maxPoints) {
if(pCol->spaceSize < spaceNeeded) {
void* ptr = realloc(pCol->pData, spaceNeeded);
if(ptr == NULL) {
uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)pCol->spaceSize,
uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)spaceNeeded,
strerror(errno));
return -1;
} else {
......@@ -239,20 +239,19 @@ void dataColInit(SDataCol *pDataCol, STColumn *pCol, int maxPoints) {
pDataCol->len = 0;
}
// value from timestamp should be TKEY here instead of TSKEY
void dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints) {
int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints) {
ASSERT(pCol != NULL && value != NULL);
if (isAllRowsNull(pCol)) {
if (isNull(value, pCol->type)) {
// all null value yet, just return
return;
return 0;
}
if(tdAllocMemForCol(pCol, maxPoints) < 0) return -1;
if (numOfRows > 0) {
// Find the first not null value, fill all previouse values as NULL
dataColSetNEleNull(pCol, numOfRows, maxPoints);
} else {
tdAllocMemForCol(pCol, maxPoints);
dataColSetNEleNull(pCol, numOfRows);
}
}
......@@ -268,12 +267,21 @@ void dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxP
memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, pCol->bytes);
pCol->len += pCol->bytes;
}
return 0;
}
static FORCE_INLINE const void *tdGetColDataOfRowUnsafe(SDataCol *pCol, int row) {
if (IS_VAR_DATA_TYPE(pCol->type)) {
return POINTER_SHIFT(pCol->pData, pCol->dataOff[row]);
} else {
return POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * row);
}
}
bool isNEleNull(SDataCol *pCol, int nEle) {
if(isAllRowsNull(pCol)) return true;
for (int i = 0; i < nEle; i++) {
if (!isNull(tdGetColDataOfRow(pCol, i), pCol->type)) return false;
if (!isNull(tdGetColDataOfRowUnsafe(pCol, i), pCol->type)) return false;
}
return true;
}
......@@ -290,9 +298,7 @@ static FORCE_INLINE void dataColSetNullAt(SDataCol *pCol, int index) {
}
}
void dataColSetNEleNull(SDataCol *pCol, int nEle, int maxPoints) {
tdAllocMemForCol(pCol, maxPoints);
static void dataColSetNEleNull(SDataCol *pCol, int nEle) {
if (IS_VAR_DATA_TYPE(pCol->type)) {
pCol->len = 0;
for (int i = 0; i < nEle; i++) {
......@@ -318,7 +324,7 @@ void dataColSetOffset(SDataCol *pCol, int nEle) {
}
}
SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows) {
SDataCols *tdNewDataCols(int maxCols, int maxRows) {
SDataCols *pCols = (SDataCols *)calloc(1, sizeof(SDataCols));
if (pCols == NULL) {
uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)sizeof(SDataCols), strerror(errno));
......@@ -326,6 +332,9 @@ SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows) {
}
pCols->maxPoints = maxRows;
pCols->maxCols = maxCols;
pCols->numOfRows = 0;
pCols->numOfCols = 0;
if (maxCols > 0) {
pCols->cols = (SDataCol *)calloc(maxCols, sizeof(SDataCol));
......@@ -342,13 +351,8 @@ SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows) {
pCols->cols[i].pData = NULL;
pCols->cols[i].dataOff = NULL;
}
pCols->maxCols = maxCols;
}
pCols->maxRowSize = maxRowSize;
return pCols;
}
......@@ -357,8 +361,9 @@ int tdInitDataCols(SDataCols *pCols, STSchema *pSchema) {
int oldMaxCols = pCols->maxCols;
if (schemaNCols(pSchema) > oldMaxCols) {
pCols->maxCols = schemaNCols(pSchema);
pCols->cols = (SDataCol *)realloc(pCols->cols, sizeof(SDataCol) * pCols->maxCols);
if (pCols->cols == NULL) return -1;
void* ptr = (SDataCol *)realloc(pCols->cols, sizeof(SDataCol) * pCols->maxCols);
if (ptr == NULL) return -1;
pCols->cols = ptr;
for(i = oldMaxCols; i < pCols->maxCols; i++) {
pCols->cols[i].pData = NULL;
pCols->cols[i].dataOff = NULL;
......@@ -366,10 +371,6 @@ int tdInitDataCols(SDataCols *pCols, STSchema *pSchema) {
}
}
if (schemaTLen(pSchema) > pCols->maxRowSize) {
pCols->maxRowSize = schemaTLen(pSchema);
}
tdResetDataCols(pCols);
pCols->numOfCols = schemaNCols(pSchema);
......@@ -398,7 +399,7 @@ SDataCols *tdFreeDataCols(SDataCols *pCols) {
}
SDataCols *tdDupDataCols(SDataCols *pDataCols, bool keepData) {
SDataCols *pRet = tdNewDataCols(pDataCols->maxRowSize, pDataCols->maxCols, pDataCols->maxPoints);
SDataCols *pRet = tdNewDataCols(pDataCols->maxCols, pDataCols->maxPoints);
if (pRet == NULL) return NULL;
pRet->numOfCols = pDataCols->numOfCols;
......@@ -413,7 +414,10 @@ SDataCols *tdDupDataCols(SDataCols *pDataCols, bool keepData) {
if (keepData) {
if (pDataCols->cols[i].len > 0) {
tdAllocMemForCol(&pRet->cols[i], pRet->maxPoints);
if(tdAllocMemForCol(&pRet->cols[i], pRet->maxPoints) < 0) {
tdFreeDataCols(pRet);
return NULL;
}
pRet->cols[i].len = pDataCols->cols[i].len;
memcpy(pRet->cols[i].pData, pDataCols->cols[i].pData, pDataCols->cols[i].len);
if (IS_VAR_DATA_TYPE(pRet->cols[i].type)) {
......@@ -584,9 +588,12 @@ static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, i
if ((key1 > key2) || (key1 == key2 && !TKEY_IS_DELETED(tkey2))) {
for (int i = 0; i < src2->numOfCols; i++) {
ASSERT(target->cols[i].type == src2->cols[i].type);
if (src2->cols[i].len > 0 && (forceSetNull || (!forceSetNull && !isNull(src2->cols[i].pData, src2->cols[i].type)))) {
if (src2->cols[i].len > 0 && !isNull(src2->cols[i].pData, src2->cols[i].type)) {
dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src2->cols + i, *iter2), target->numOfRows,
target->maxPoints);
} else if(!forceSetNull && key1 == key2 && src1->cols[i].len > 0) {
dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src1->cols + i, *iter1), target->numOfRows,
target->maxPoints);
}
}
target->numOfRows++;
......@@ -844,7 +851,8 @@ SMemRow mergeTwoMemRows(void *buffer, SMemRow row1, SMemRow row2, STSchema *pSch
int16_t k;
for (k = 0; k < nKvNCols; ++k) {
SColInfo *pColInfo = taosArrayGet(stashRow, k);
tdAppendKvColVal(kvRow, pColInfo->colVal, pColInfo->colId, pColInfo->colType, &toffset);
tdAppendKvColVal(kvRow, pColInfo->colVal, true, pColInfo->colId, pColInfo->colType, toffset);
toffset += sizeof(SColIdx);
}
ASSERT(kvLen == memRowTLen(tRow));
}
......
......@@ -113,7 +113,6 @@
</includes>
<excludes>
<exclude>**/AppMemoryLeakTest.java</exclude>
<exclude>**/AuthenticationTest.java</exclude>
<exclude>**/ConnectMultiTaosdByRestfulWithDifferentTokenTest.java</exclude>
<exclude>**/DatetimeBefore1970Test.java</exclude>
<exclude>**/FailOverTest.java</exclude>
......
......@@ -14,6 +14,8 @@
*****************************************************************************/
package com.taosdata.jdbc;
import java.net.URLEncoder;
import java.nio.charset.StandardCharsets;
import java.sql.*;
import java.util.*;
import java.util.logging.Logger;
......@@ -127,6 +129,11 @@ public class TSDBDriver extends AbstractDriver {
return null;
}
if (!props.containsKey(TSDBDriver.PROPERTY_KEY_USER))
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_USER_IS_REQUIRED);
if (!props.containsKey(TSDBDriver.PROPERTY_KEY_PASSWORD))
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PASSWORD_IS_REQUIRED);
try {
TSDBJNIConnector.init((String) props.get(PROPERTY_KEY_CONFIG_DIR), (String) props.get(PROPERTY_KEY_LOCALE),
(String) props.get(PROPERTY_KEY_CHARSET), (String) props.get(PROPERTY_KEY_TIME_ZONE));
......
......@@ -33,6 +33,8 @@ public class TSDBError {
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE, "numeric value out of range");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE, "unknown taos type in tdengine");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TIMESTAMP_PRECISION, "unknown timestamp precision");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_USER_IS_REQUIRED, "user is required");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_PASSWORD_IS_REQUIRED, "password is required");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN, "unknown error");
......
......@@ -29,6 +29,9 @@ public class TSDBErrorNumbers {
public static final int ERROR_UNKNOWN_TIMESTAMP_PRECISION = 0x2316; // unknown timestamp precision
public static final int ERROR_RESTFul_Client_Protocol_Exception = 0x2317;
public static final int ERROR_RESTFul_Client_IOException = 0x2318;
public static final int ERROR_USER_IS_REQUIRED = 0x2319; // user is required
public static final int ERROR_PASSWORD_IS_REQUIRED = 0x231a; // password is required
public static final int ERROR_UNKNOWN = 0x2350; //unknown error
......@@ -67,6 +70,8 @@ public class TSDBErrorNumbers {
errorNumbers.add(ERROR_UNKNOWN_TAOS_TYPE);
errorNumbers.add(ERROR_UNKNOWN_TIMESTAMP_PRECISION);
errorNumbers.add(ERROR_RESTFul_Client_IOException);
errorNumbers.add(ERROR_USER_IS_REQUIRED);
errorNumbers.add(ERROR_PASSWORD_IS_REQUIRED);
errorNumbers.add(ERROR_RESTFul_Client_Protocol_Exception);
......
......@@ -36,7 +36,6 @@ public class TSDBJNIConnector {
static {
System.loadLibrary("taos");
System.out.println("java.library.path:" + System.getProperty("java.library.path"));
}
public boolean isClosed() {
......
......@@ -7,6 +7,7 @@ import com.taosdata.jdbc.utils.HttpClientPoolUtil;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.nio.charset.StandardCharsets;
import java.sql.*;
import java.util.Properties;
import java.util.logging.Logger;
......@@ -40,8 +41,13 @@ public class RestfulDriver extends AbstractDriver {
String loginUrl = "http://" + host + ":" + port + "/rest/login/" + props.getProperty(TSDBDriver.PROPERTY_KEY_USER) + "/" + props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD) + "";
try {
String user = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_USER), "UTF-8");
String password = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD), "UTF-8");
if (!props.containsKey(TSDBDriver.PROPERTY_KEY_USER))
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_USER_IS_REQUIRED);
if (!props.containsKey(TSDBDriver.PROPERTY_KEY_PASSWORD))
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PASSWORD_IS_REQUIRED);
String user = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_USER), StandardCharsets.UTF_8.displayName());
String password = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD), StandardCharsets.UTF_8.displayName());
loginUrl = "http://" + props.getProperty(TSDBDriver.PROPERTY_KEY_HOST) + ":" + props.getProperty(TSDBDriver.PROPERTY_KEY_PORT) + "/rest/login/" + user + "/" + password + "";
} catch (UnsupportedEncodingException e) {
e.printStackTrace();
......
......@@ -7,6 +7,7 @@ import com.taosdata.jdbc.AbstractStatement;
import com.taosdata.jdbc.TSDBDriver;
import com.taosdata.jdbc.TSDBError;
import com.taosdata.jdbc.TSDBErrorNumbers;
import com.taosdata.jdbc.enums.TimestampFormat;
import com.taosdata.jdbc.utils.HttpClientPoolUtil;
import com.taosdata.jdbc.utils.SqlSyntaxValidator;
......@@ -45,9 +46,7 @@ public class RestfulStatement extends AbstractStatement {
if (!SqlSyntaxValidator.isValidForExecuteUpdate(sql))
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_UPDATE, "not a valid sql for executeUpdate: " + sql);
final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
return executeOneUpdate(url, sql);
return executeOneUpdate(sql);
}
@Override
......@@ -62,34 +61,25 @@ public class RestfulStatement extends AbstractStatement {
public boolean execute(String sql) throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
if (!SqlSyntaxValidator.isValidForExecute(sql))
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE, "not a valid sql for execute: " + sql);
//如果执行了use操作应该将当前Statement的catalog设置为新的database
boolean result = true;
String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
if (conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT).equals("TIMESTAMP")) {
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlt";
}
if (conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT).equals("UTC")) {
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc";
}
if (SqlSyntaxValidator.isUseSql(sql)) {
HttpClientPoolUtil.execute(url, sql, this.conn.getToken());
HttpClientPoolUtil.execute(getUrl(), sql, this.conn.getToken());
this.database = sql.trim().replace("use", "").trim();
this.conn.setCatalog(this.database);
result = false;
} else if (SqlSyntaxValidator.isDatabaseUnspecifiedQuery(sql)) {
executeOneQuery(sql);
} else if (SqlSyntaxValidator.isDatabaseUnspecifiedUpdate(sql)) {
executeOneUpdate(url, sql);
executeOneUpdate(sql);
result = false;
} else {
if (SqlSyntaxValidator.isValidForExecuteQuery(sql)) {
executeQuery(sql);
executeOneQuery(sql);
} else {
executeUpdate(sql);
executeOneUpdate(sql);
result = false;
}
}
......@@ -97,19 +87,25 @@ public class RestfulStatement extends AbstractStatement {
return result;
}
private ResultSet executeOneQuery(String sql) throws SQLException {
if (!SqlSyntaxValidator.isValidForExecuteQuery(sql))
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_QUERY, "not a valid sql for executeQuery: " + sql);
private String getUrl() throws SQLException {
TimestampFormat timestampFormat = TimestampFormat.valueOf(conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT).trim().toUpperCase());
String url;
switch (timestampFormat) {
case TIMESTAMP:
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlt";
break;
case UTC:
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc";
break;
default:
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
}
return url;
}
private ResultSet executeOneQuery(String sql) throws SQLException {
// row data
String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
String timestampFormat = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT);
if ("TIMESTAMP".equalsIgnoreCase(timestampFormat))
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlt";
if ("UTC".equalsIgnoreCase(timestampFormat))
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc";
String result = HttpClientPoolUtil.execute(url, sql, this.conn.getToken());
String result = HttpClientPoolUtil.execute(getUrl(), sql, this.conn.getToken());
JSONObject resultJson = JSON.parseObject(result);
if (resultJson.getString("status").equals("error")) {
throw TSDBError.createSQLException(resultJson.getInteger("code"), resultJson.getString("desc"));
......@@ -119,11 +115,8 @@ public class RestfulStatement extends AbstractStatement {
return resultSet;
}
private int executeOneUpdate(String url, String sql) throws SQLException {
if (!SqlSyntaxValidator.isValidForExecuteUpdate(sql))
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_UPDATE, "not a valid sql for executeUpdate: " + sql);
String result = HttpClientPoolUtil.execute(url, sql, this.conn.getToken());
private int executeOneUpdate(String sql) throws SQLException {
String result = HttpClientPoolUtil.execute(getUrl(), sql, this.conn.getToken());
JSONObject jsonObject = JSON.parseObject(result);
if (jsonObject.getString("status").equals("error")) {
throw TSDBError.createSQLException(jsonObject.getInteger("code"), jsonObject.getString("desc"));
......@@ -134,7 +127,7 @@ public class RestfulStatement extends AbstractStatement {
}
private int getAffectedRows(JSONObject jsonObject) throws SQLException {
// create ... SQLs should return 0 , and Restful result is this:
// create ... SQLs should return 0 , and Restful result like this:
// {"status": "succ", "head": ["affected_rows"], "data": [[0]], "rows": 1}
JSONArray head = jsonObject.getJSONArray("head");
if (head.size() != 1 || !"affected_rows".equals(head.getString(0)))
......
......@@ -16,8 +16,7 @@ package com.taosdata.jdbc.utils;
public class SqlSyntaxValidator {
private static final String[] SQL = {"select", "insert", "import", "create", "use", "alter", "drop", "set", "show", "describe", "reset"};
private static final String[] updateSQL = {"insert", "import", "create", "use", "alter", "drop", "set"};
private static final String[] updateSQL = {"insert", "import", "create", "use", "alter", "drop", "set", "reset"};
private static final String[] querySQL = {"select", "show", "describe"};
private static final String[] databaseUnspecifiedShow = {"databases", "dnodes", "mnodes", "variables"};
......@@ -38,14 +37,6 @@ public class SqlSyntaxValidator {
return false;
}
public static boolean isValidForExecute(String sql) {
for (String prefix : SQL) {
if (sql.trim().toLowerCase().startsWith(prefix))
return true;
}
return false;
}
public static boolean isDatabaseUnspecifiedQuery(String sql) {
for (String databaseObj : databaseUnspecifiedShow) {
if (sql.trim().toLowerCase().matches("show\\s+" + databaseObj + ".*"))
......@@ -63,9 +54,5 @@ public class SqlSyntaxValidator {
return sql.trim().toLowerCase().startsWith("use");
}
public static boolean isSelectSql(String sql) {
return sql.trim().toLowerCase().startsWith("select");
}
}
......@@ -69,6 +69,8 @@ public class SubscribeTest {
@Before
public void createDatabase() throws SQLException {
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
......
package com.taosdata.jdbc.cases;
import com.taosdata.jdbc.TSDBErrorNumbers;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import java.sql.*;
......@@ -12,6 +15,47 @@ public class AuthenticationTest {
private static final String password = "taos?data";
private Connection conn;
@Test
public void connectWithoutUserByJni() {
try {
DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?");
} catch (SQLException e) {
Assert.assertEquals(TSDBErrorNumbers.ERROR_USER_IS_REQUIRED, e.getErrorCode());
Assert.assertEquals("ERROR (2319): user is required", e.getMessage());
}
}
@Test
public void connectWithoutUserByRestful() {
try {
DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?");
} catch (SQLException e) {
Assert.assertEquals(TSDBErrorNumbers.ERROR_USER_IS_REQUIRED, e.getErrorCode());
Assert.assertEquals("ERROR (2319): user is required", e.getMessage());
}
}
@Test
public void connectWithoutPasswordByJni() {
try {
DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?user=root");
} catch (SQLException e) {
Assert.assertEquals(TSDBErrorNumbers.ERROR_PASSWORD_IS_REQUIRED, e.getErrorCode());
Assert.assertEquals("ERROR (231a): password is required", e.getMessage());
}
}
@Test
public void connectWithoutPasswordByRestful() {
try {
DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root");
} catch (SQLException e) {
Assert.assertEquals(TSDBErrorNumbers.ERROR_PASSWORD_IS_REQUIRED, e.getErrorCode());
Assert.assertEquals("ERROR (231a): password is required", e.getMessage());
}
}
@Ignore
@Test
public void test() {
// change password
......
......@@ -29,6 +29,8 @@ public class BatchInsertTest {
public void before() {
try {
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
......
......@@ -21,6 +21,8 @@ public class ImportTest {
public static void before() {
try {
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
......
......@@ -270,6 +270,41 @@ public class InsertSpecialCharacterJniTest {
}
}
@Ignore
@Test
public void testSingleQuotaEscape() throws SQLException {
final long now = System.currentTimeMillis();
final String sql = "insert into t? using ? tags(?) values(?, ?, ?) t? using " + tbname2 + " tags(?) values(?,?,?) ";
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
// t1
pstmt.setInt(1, 1);
pstmt.setString(2, tbname2);
pstmt.setString(3, special_character_str_5);
pstmt.setTimestamp(4, new Timestamp(now));
pstmt.setBytes(5, special_character_str_5.getBytes());
// t2
pstmt.setInt(7, 2);
pstmt.setString(8, special_character_str_5);
pstmt.setTimestamp(9, new Timestamp(now));
pstmt.setString(11, special_character_str_5);
int ret = pstmt.executeUpdate();
Assert.assertEquals(2, ret);
}
String query = "select * from ?.t? where ? < ? and ts >= ? and f1 is not null";
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
pstmt.setString(1, dbName);
pstmt.setInt(2, 1);
pstmt.setString(3, "ts");
pstmt.setTimestamp(4, new Timestamp(System.currentTimeMillis()));
pstmt.setTimestamp(5, new Timestamp(0));
ResultSet rs = pstmt.executeQuery();
Assert.assertNotNull(rs);
}
}
@Test
public void testCase10() throws SQLException {
final long now = System.currentTimeMillis();
......@@ -293,13 +328,12 @@ public class InsertSpecialCharacterJniTest {
Assert.assertEquals(2, ret);
}
//query t1
String query = "select * from ?.t? where ts < ? and ts >= ? and ? is not null";
String query = "select * from ?.t? where ts < ? and ts >= ? and f1 is not null";
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
pstmt.setString(1, dbName);
pstmt.setInt(2, 1);
pstmt.setTimestamp(3, new Timestamp(System.currentTimeMillis()));
pstmt.setTimestamp(4, new Timestamp(0));
pstmt.setString(5, "f1");
ResultSet rs = pstmt.executeQuery();
rs.next();
......@@ -311,12 +345,11 @@ public class InsertSpecialCharacterJniTest {
Assert.assertNull(f2);
}
// query t2
query = "select * from t? where ts < ? and ts >= ? and ? is not null";
query = "select * from t? where ts < ? and ts >= ? and f2 is not null";
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
pstmt.setInt(1, 2);
pstmt.setTimestamp(2, new Timestamp(System.currentTimeMillis()));
pstmt.setTimestamp(3, new Timestamp(0));
pstmt.setString(4, "f2");
ResultSet rs = pstmt.executeQuery();
rs.next();
......
......@@ -293,13 +293,12 @@ public class InsertSpecialCharacterRestfulTest {
Assert.assertEquals(2, ret);
}
//query t1
String query = "select * from ?.t? where ts < ? and ts >= ? and ? is not null";
String query = "select * from ?.t? where ts < ? and ts >= ? and f1 is not null";
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
pstmt.setString(1, dbName);
pstmt.setInt(2, 1);
pstmt.setTimestamp(3, new Timestamp(System.currentTimeMillis()));
pstmt.setTimestamp(4, new Timestamp(0));
pstmt.setString(5, "f1");
ResultSet rs = pstmt.executeQuery();
rs.next();
......@@ -311,12 +310,11 @@ public class InsertSpecialCharacterRestfulTest {
Assert.assertNull(f2);
}
// query t2
query = "select * from t? where ts < ? and ts >= ? and ? is not null";
query = "select * from t? where ts < ? and ts >= ? and f2 is not null";
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
pstmt.setInt(1, 2);
pstmt.setTimestamp(2, new Timestamp(System.currentTimeMillis()));
pstmt.setTimestamp(3, new Timestamp(0));
pstmt.setString(4, "f2");
ResultSet rs = pstmt.executeQuery();
rs.next();
......
......@@ -22,6 +22,8 @@ public class QueryDataTest {
public void createDatabase() {
try {
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
......
package com.taosdata.jdbc.cases;
import com.taosdata.jdbc.TSDBDriver;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.sql.*;
import java.util.Properties;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.sql.Statement;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
public class ResetQueryCacheTest {
static Connection connection;
static Statement statement;
static String host = "127.0.0.1";
@Before
public void init() {
try {
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
} catch (SQLException e) {
return;
}
@Test
public void jni() throws SQLException {
// given
Connection connection = DriverManager.getConnection("jdbc:TAOS://127.0.0.1:0/?user=root&password=taosdata&timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8");
Statement statement = connection.createStatement();
// when
boolean execute = statement.execute("reset query cache");
// then
assertFalse(execute);
assertEquals(0, statement.getUpdateCount());
statement.close();
connection.close();
}
@Test
public void testResetQueryCache() throws SQLException {
String resetSql = "reset query cache";
statement.execute(resetSql);
}
public void restful() throws SQLException {
// given
Connection connection = DriverManager.getConnection("jdbc:TAOS-RS://127.0.0.1:6041/?user=root&password=taosdata&timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8");
Statement statement = connection.createStatement();
// when
boolean execute = statement.execute("reset query cache");
// then
assertFalse(execute);
assertEquals(0, statement.getUpdateCount());
@After
public void close() {
try {
if (statement != null)
statement.close();
if (connection != null)
connection.close();
} catch (SQLException e) {
e.printStackTrace();
}
statement.close();
connection.close();
}
}
\ No newline at end of file
......@@ -20,6 +20,8 @@ public class SelectTest {
public void createDatabaseAndTable() {
try {
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
......
......@@ -24,6 +24,8 @@ public class StableTest {
public static void createDatabase() {
try {
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
......
package com.taosdata.jdbc.utils;
import org.junit.Assert;
import org.junit.Test;
public class SqlSyntaxValidatorTest {
@Test
public void isSelectSQL() {
Assert.assertTrue(SqlSyntaxValidator.isSelectSql("select * from test.weather"));
Assert.assertTrue(SqlSyntaxValidator.isSelectSql(" select * from test.weather"));
Assert.assertTrue(SqlSyntaxValidator.isSelectSql(" select * from test.weather "));
Assert.assertFalse(SqlSyntaxValidator.isSelectSql("insert into test.weather values(now, 1.1, 2)"));
}
@Test
public void isUseSQL() {
Assert.assertTrue(SqlSyntaxValidator.isUseSql("use database test"));
}
}
\ No newline at end of file
......@@ -5983,7 +5983,7 @@ static int32_t prepareStbStmtBind(
int64_t startTime, int32_t recSeq,
bool isColumn)
{
char *bindBuffer = calloc(1, g_args.len_of_binary);
char *bindBuffer = calloc(1, DOUBLE_BUFF_LEN); // g_args.len_of_binary);
if (bindBuffer == NULL) {
errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n",
__func__, __LINE__, g_args.len_of_binary);
......@@ -6319,7 +6319,7 @@ static void printStatPerThread(threadInfo *pThreadInfo)
pThreadInfo->totalInsertRows,
pThreadInfo->totalAffectedRows,
(pThreadInfo->totalDelay)?
(double)(pThreadInfo->totalAffectedRows/(pThreadInfo->totalDelay/1000000.0)):
(double)(pThreadInfo->totalAffectedRows/((double)pThreadInfo->totalDelay/1000000.0)):
FLT_MAX);
}
......@@ -6539,7 +6539,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
verbosePrint("[%d] %s() LN%d, buffer=%s\n",
pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->buffer);
startTs = taosGetTimestampMs();
startTs = taosGetTimestampUs();
if (recOfBatch == 0) {
errorPrint("[%d] %s() LN%d Failed to insert records of batch %d\n",
......@@ -6555,10 +6555,10 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
}
int64_t affectedRows = execInsert(pThreadInfo, recOfBatch);
endTs = taosGetTimestampMs();
endTs = taosGetTimestampUs();
uint64_t delay = endTs - startTs;
performancePrint("%s() LN%d, insert execution time is %"PRIu64"ms\n",
__func__, __LINE__, delay);
performancePrint("%s() LN%d, insert execution time is %10.2f ms\n",
__func__, __LINE__, delay / 1000.0);
verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n",
pThreadInfo->threadID,
__func__, __LINE__, affectedRows);
......@@ -6721,8 +6721,8 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
endTs = taosGetTimestampUs();
uint64_t delay = endTs - startTs;
performancePrint("%s() LN%d, insert execution time is %"PRId64"ms\n",
__func__, __LINE__, delay);
performancePrint("%s() LN%d, insert execution time is %10.f ms\n",
__func__, __LINE__, delay/1000.0);
verbosePrint("[%d] %s() LN%d affectedRows=%d\n",
pThreadInfo->threadID,
__func__, __LINE__, affectedRows);
......
......@@ -101,13 +101,17 @@ char *httpGetStatusDesc(int32_t statusCode) {
}
static void httpCleanupString(HttpString *str) {
free(str->str);
str->str = NULL;
str->pos = 0;
str->size = 0;
if (str->str) {
free(str->str);
str->str = NULL;
str->pos = 0;
str->size = 0;
}
}
static int32_t httpAppendString(HttpString *str, const char *s, int32_t len) {
char *new_str = NULL;
if (str->size == 0) {
str->pos = 0;
str->size = len + 1;
......@@ -115,7 +119,16 @@ static int32_t httpAppendString(HttpString *str, const char *s, int32_t len) {
} else if (str->pos + len + 1 >= str->size) {
str->size += len;
str->size *= 4;
str->str = realloc(str->str, str->size);
new_str = realloc(str->str, str->size);
if (new_str == NULL && str->str) {
// if str->str was not NULL originally,
// the old allocated memory was left unchanged,
// see man 3 realloc
free(str->str);
}
str->str = new_str;
} else {
}
......@@ -317,7 +330,7 @@ static int32_t httpOnParseHeaderField(HttpParser *parser, const char *key, const
static int32_t httpOnBody(HttpParser *parser, const char *chunk, int32_t len) {
HttpContext *pContext = parser->pContext;
HttpString * buf = &parser->body;
HttpString *buf = &parser->body;
if (parser->parseCode != TSDB_CODE_SUCCESS) return -1;
if (buf->size <= 0) {
......@@ -326,6 +339,7 @@ static int32_t httpOnBody(HttpParser *parser, const char *chunk, int32_t len) {
}
int32_t newSize = buf->pos + len + 1;
char *newStr = NULL;
if (newSize >= buf->size) {
if (buf->size >= HTTP_BUFFER_SIZE) {
httpError("context:%p, fd:%d, failed parse body, exceeding buffer size %d", pContext, pContext->fd, buf->size);
......@@ -336,7 +350,12 @@ static int32_t httpOnBody(HttpParser *parser, const char *chunk, int32_t len) {
newSize = MAX(newSize, HTTP_BUFFER_INIT);
newSize *= 4;
newSize = MIN(newSize, HTTP_BUFFER_SIZE);
buf->str = realloc(buf->str, newSize);
newStr = realloc(buf->str, newSize);
if (newStr == NULL && buf->str) {
free(buf->str);
}
buf->str = newStr;
buf->size = newSize;
if (buf->str == NULL) {
......@@ -374,13 +393,20 @@ static HTTP_PARSER_STATE httpTopStack(HttpParser *parser) {
static int32_t httpPushStack(HttpParser *parser, HTTP_PARSER_STATE state) {
HttpStack *stack = &parser->stacks;
int8_t *newStacks = NULL;
if (stack->size == 0) {
stack->pos = 0;
stack->size = 32;
stack->stacks = malloc(stack->size * sizeof(int8_t));
} else if (stack->pos + 1 > stack->size) {
stack->size *= 2;
stack->stacks = realloc(stack->stacks, stack->size * sizeof(int8_t));
newStacks = realloc(stack->stacks, stack->size * sizeof(int8_t));
if (newStacks == NULL && stack->stacks) {
free(stack->stacks);
}
stack->stacks = newStacks;
} else {
}
......
......@@ -188,13 +188,17 @@ bool httpMallocMultiCmds(HttpContext *pContext, int32_t cmdSize, int32_t bufferS
bool httpReMallocMultiCmdsSize(HttpContext *pContext, int32_t cmdSize) {
HttpSqlCmds *multiCmds = pContext->multiCmds;
if (cmdSize > HTTP_MAX_CMD_SIZE) {
if (cmdSize <= 0 && cmdSize > HTTP_MAX_CMD_SIZE) {
httpError("context:%p, fd:%d, user:%s, mulitcmd size:%d large then %d", pContext, pContext->fd, pContext->user,
cmdSize, HTTP_MAX_CMD_SIZE);
return false;
}
multiCmds->cmds = (HttpSqlCmd *)realloc(multiCmds->cmds, (size_t)cmdSize * sizeof(HttpSqlCmd));
HttpSqlCmd *new_cmds = (HttpSqlCmd *)realloc(multiCmds->cmds, (size_t)cmdSize * sizeof(HttpSqlCmd));
if (new_cmds == NULL && multiCmds->cmds) {
free(multiCmds->cmds);
}
multiCmds->cmds = new_cmds;
if (multiCmds->cmds == NULL) {
httpError("context:%p, fd:%d, user:%s, malloc cmds:%d error", pContext, pContext->fd, pContext->user, cmdSize);
return false;
......@@ -208,13 +212,17 @@ bool httpReMallocMultiCmdsSize(HttpContext *pContext, int32_t cmdSize) {
bool httpReMallocMultiCmdsBuffer(HttpContext *pContext, int32_t bufferSize) {
HttpSqlCmds *multiCmds = pContext->multiCmds;
if (bufferSize > HTTP_MAX_BUFFER_SIZE) {
if (bufferSize <= 0 || bufferSize > HTTP_MAX_BUFFER_SIZE) {
httpError("context:%p, fd:%d, user:%s, mulitcmd buffer size:%d large then %d", pContext, pContext->fd,
pContext->user, bufferSize, HTTP_MAX_BUFFER_SIZE);
return false;
}
multiCmds->buffer = (char *)realloc(multiCmds->buffer, (size_t)bufferSize);
char *new_buffer = (char *)realloc(multiCmds->buffer, (size_t)bufferSize);
if (new_buffer == NULL && multiCmds->buffer) {
free(multiCmds->buffer);
}
multiCmds->buffer = new_buffer;
if (multiCmds->buffer == NULL) {
httpError("context:%p, fd:%d, user:%s, malloc buffer:%d error", pContext, pContext->fd, pContext->user, bufferSize);
return false;
......
......@@ -206,6 +206,12 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, void** data, int32_t outputR
} else {
assert(pFillInfo->currentKey == ts);
initBeforeAfterDataBuf(pFillInfo, prev);
if (pFillInfo->type == TSDB_FILL_NEXT && (pFillInfo->index + 1) < pFillInfo->numOfRows) {
initBeforeAfterDataBuf(pFillInfo, next);
++pFillInfo->index;
copyCurrentRowIntoBuf(pFillInfo, srcData, *next);
--pFillInfo->index;
}
// assign rows to dst buffer
for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
......@@ -227,6 +233,12 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, void** data, int32_t outputR
} else if (pFillInfo->type == TSDB_FILL_LINEAR) {
assignVal(output, src, pCol->col.bytes, pCol->col.type);
memcpy(*prev + pCol->col.offset, src, pCol->col.bytes);
} else if (pFillInfo->type == TSDB_FILL_NEXT) {
if (*next) {
assignVal(output, *next + pCol->col.offset, pCol->col.bytes, pCol->col.type);
} else {
setNull(output, pCol->col.type, pCol->col.bytes);
}
} else {
assignVal(output, (char*)&pCol->fillVal.i, pCol->col.bytes, pCol->col.type);
}
......
......@@ -397,7 +397,11 @@ void *taosOpenTcpClientConnection(void *shandle, void *thandle, uint32_t ip, uin
SThreadObj *pThreadObj = pClientObj->pThreadObj[index];
SOCKET fd = taosOpenTcpClientSocket(ip, port, pThreadObj->ip);
#if defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32)
if (fd == (SOCKET)-1) return NULL;
#else
if (fd <= 0) return NULL;
#endif
struct sockaddr_in sin;
uint16_t localPort = 0;
......
......@@ -24,8 +24,7 @@ typedef struct STable {
tstr* name; // NOTE: there a flexible string here
uint64_t suid;
struct STable* pSuper; // super table pointer
uint8_t numOfSchemas;
STSchema* schema[TSDB_MAX_TABLE_SCHEMAS];
SArray* schema;
STSchema* tagSchema;
SKVRow tagVal;
SSkipList* pIndex; // For TSDB_SUPER_TABLE, it is the skiplist index
......@@ -107,10 +106,9 @@ static FORCE_INLINE STSchema* tsdbGetTableSchemaImpl(STable* pTable, bool lock,
if (lock) TSDB_RLOCK_TABLE(pDTable);
if (_version < 0) { // get the latest version of schema
pTSchema = pDTable->schema[pDTable->numOfSchemas - 1];
pTSchema = *(STSchema **)taosArrayGetLast(pDTable->schema);
} else { // get the schema with version
void* ptr = taosbsearch(&_version, pDTable->schema, pDTable->numOfSchemas, sizeof(STSchema*),
tsdbCompareSchemaVersion, TD_EQ);
void* ptr = taosArraySearch(pDTable->schema, &_version, tsdbCompareSchemaVersion, TD_EQ);
if (ptr == NULL) {
terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION;
goto _exit;
......
......@@ -722,7 +722,7 @@ static int tsdbInitCommitH(SCommitH *pCommith, STsdbRepo *pRepo) {
return -1;
}
pCommith->pDataCols = tdNewDataCols(0, 0, pCfg->maxRowsPerFileBlock);
pCommith->pDataCols = tdNewDataCols(0, pCfg->maxRowsPerFileBlock);
if (pCommith->pDataCols == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
tsdbDestroyCommitH(pCommith);
......@@ -920,7 +920,6 @@ int tsdbWriteBlockImpl(STsdbRepo *pRepo, STable *pTable, SDFile *pDFile, SDataCo
SDataCol * pDataCol = pDataCols->cols + ncol;
SBlockCol *pBlockCol = pBlockData->cols + nColsNotAllNull;
// if (isNEleNull(pDataCol, rowsToWrite)) { // all data to commit are NULL, just ignore it
if (isAllRowsNull(pDataCol)) { // all data to commit are NULL, just ignore it
continue;
}
......@@ -1277,6 +1276,7 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt
if (key1 < key2) {
for (int i = 0; i < pDataCols->numOfCols; i++) {
//TODO: dataColAppendVal may fail
dataColAppendVal(pTarget->cols + i, tdGetColDataOfRow(pDataCols->cols + i, *iter), pTarget->numOfRows,
pTarget->maxPoints);
}
......@@ -1308,6 +1308,7 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt
ASSERT(!isRowDel);
for (int i = 0; i < pDataCols->numOfCols; i++) {
//TODO: dataColAppendVal may fail
dataColAppendVal(pTarget->cols + i, tdGetColDataOfRow(pDataCols->cols + i, *iter), pTarget->numOfRows,
pTarget->maxPoints);
}
......
......@@ -296,7 +296,7 @@ static int tsdbCompactMeta(STsdbRepo *pRepo) {
return -1;
}
pComph->pDataCols = tdNewDataCols(0, 0, pCfg->maxRowsPerFileBlock);
pComph->pDataCols = tdNewDataCols(0, pCfg->maxRowsPerFileBlock);
if (pComph->pDataCols == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
tsdbDestroyCompactH(pComph);
......
......@@ -702,11 +702,12 @@ static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg) {
}
//row1 has higher priority
static SMemRow tsdbInsertDupKeyMerge(SMemRow row1, SMemRow row2, STsdbRepo* pRepo, STSchema **ppSchema1, STSchema **ppSchema2, STable* pTable, int32_t* pAffectedRows, int64_t* pPoints, SMemRow* pLastRow) {
static SMemRow tsdbInsertDupKeyMerge(SMemRow row1, SMemRow row2, STsdbRepo* pRepo,
STSchema **ppSchema1, STSchema **ppSchema2,
STable* pTable, int32_t* pPoints, SMemRow* pLastRow) {
//for compatiblity, duplicate key inserted when update=0 should be also calculated as affected rows!
if(row1 == NULL && row2 == NULL && pRepo->config.update == TD_ROW_DISCARD_UPDATE) {
(*pAffectedRows)++;
(*pPoints)++;
return NULL;
}
......@@ -715,7 +716,6 @@ static SMemRow tsdbInsertDupKeyMerge(SMemRow row1, SMemRow row2, STsdbRepo* pRep
void* pMem = tsdbAllocBytes(pRepo, memRowTLen(row1));
if(pMem == NULL) return NULL;
memRowCpy(pMem, row1);
(*pAffectedRows)++;
(*pPoints)++;
*pLastRow = pMem;
return pMem;
......@@ -750,18 +750,16 @@ static SMemRow tsdbInsertDupKeyMerge(SMemRow row1, SMemRow row2, STsdbRepo* pRep
if(pMem == NULL) return NULL;
memRowCpy(pMem, tmp);
(*pAffectedRows)++;
(*pPoints)++;
*pLastRow = pMem;
return pMem;
}
static void* tsdbInsertDupKeyMergePacked(void** args) {
return tsdbInsertDupKeyMerge(args[0], args[1], args[2], (STSchema**)&args[3], (STSchema**)&args[4], args[5], args[6], args[7], args[8]);
return tsdbInsertDupKeyMerge(args[0], args[1], args[2], (STSchema**)&args[3], (STSchema**)&args[4], args[5], args[6], args[7]);
}
static void tsdbSetupSkipListHookFns(SSkipList* pSkipList, STsdbRepo *pRepo, STable *pTable, int32_t* pAffectedRows, int64_t* pPoints, SMemRow* pLastRow) {
static void tsdbSetupSkipListHookFns(SSkipList* pSkipList, STsdbRepo *pRepo, STable *pTable, int32_t* pPoints, SMemRow* pLastRow) {
if(pSkipList->insertHandleFn == NULL) {
tGenericSavedFunc *dupHandleSavedFunc = genericSavedFuncInit((GenericVaFunc)&tsdbInsertDupKeyMergePacked, 9);
......@@ -769,17 +767,16 @@ static void tsdbSetupSkipListHookFns(SSkipList* pSkipList, STsdbRepo *pRepo, STa
dupHandleSavedFunc->args[3] = NULL;
dupHandleSavedFunc->args[4] = NULL;
dupHandleSavedFunc->args[5] = pTable;
dupHandleSavedFunc->args[6] = pAffectedRows;
dupHandleSavedFunc->args[7] = pPoints;
dupHandleSavedFunc->args[8] = pLastRow;
pSkipList->insertHandleFn = dupHandleSavedFunc;
}
pSkipList->insertHandleFn->args[6] = pPoints;
pSkipList->insertHandleFn->args[7] = pLastRow;
}
static int tsdbInsertDataToTable(STsdbRepo* pRepo, SSubmitBlk* pBlock, int32_t *pAffectedRows) {
STsdbMeta *pMeta = pRepo->tsdbMeta;
int64_t points = 0;
int32_t points = 0;
STable *pTable = NULL;
SSubmitBlkIter blkIter = {0};
SMemTable *pMemTable = NULL;
......@@ -830,9 +827,10 @@ static int tsdbInsertDataToTable(STsdbRepo* pRepo, SSubmitBlk* pBlock, int32_t *
SMemRow lastRow = NULL;
int64_t osize = SL_SIZE(pTableData->pData);
tsdbSetupSkipListHookFns(pTableData->pData, pRepo, pTable, pAffectedRows, &points, &lastRow);
tsdbSetupSkipListHookFns(pTableData->pData, pRepo, pTable, &points, &lastRow);
tSkipListPutBatchByIter(pTableData->pData, &blkIter, (iter_next_fn_t)tsdbGetSubmitBlkNext);
int64_t dsize = SL_SIZE(pTableData->pData) - osize;
(*pAffectedRows) += points;
if(lastRow != NULL) {
......
......@@ -17,7 +17,6 @@
#define TSDB_SUPER_TABLE_SL_LEVEL 5
#define DEFAULT_TAG_INDEX_COLUMN 0
static int tsdbCompareSchemaVersion(const void *key1, const void *key2);
static char * getTagIndexKey(const void *pData);
static STable *tsdbNewTable();
static STable *tsdbCreateTableFromCfg(STableCfg *pCfg, bool isSuper, STable *pSTable);
......@@ -44,6 +43,8 @@ static int tsdbRemoveTableFromStore(STsdbRepo *pRepo, STable *pTable);
static int tsdbRmTableFromMeta(STsdbRepo *pRepo, STable *pTable);
static int tsdbAdjustMetaTables(STsdbRepo *pRepo, int tid);
static int tsdbCheckTableTagVal(SKVRow *pKVRow, STSchema *pSchema);
static int tsdbAddSchema(STable *pTable, STSchema *pSchema);
static void tsdbFreeTableSchema(STable *pTable);
// ------------------ OUTER FUNCTIONS ------------------
int tsdbCreateTable(STsdbRepo *repo, STableCfg *pCfg) {
......@@ -723,17 +724,10 @@ void tsdbUpdateTableSchema(STsdbRepo *pRepo, STable *pTable, STSchema *pSchema,
STsdbMeta *pMeta = pRepo->tsdbMeta;
STable *pCTable = (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE) ? pTable->pSuper : pTable;
ASSERT(schemaVersion(pSchema) > schemaVersion(pCTable->schema[pCTable->numOfSchemas - 1]));
ASSERT(schemaVersion(pSchema) > schemaVersion(*(STSchema **)taosArrayGetLast(pCTable->schema)));
TSDB_WLOCK_TABLE(pCTable);
if (pCTable->numOfSchemas < TSDB_MAX_TABLE_SCHEMAS) {
pCTable->schema[pCTable->numOfSchemas++] = pSchema;
} else {
ASSERT(pCTable->numOfSchemas == TSDB_MAX_TABLE_SCHEMAS);
tdFreeSchema(pCTable->schema[0]);
memmove(pCTable->schema, pCTable->schema + 1, sizeof(STSchema *) * (TSDB_MAX_TABLE_SCHEMAS - 1));
pCTable->schema[pCTable->numOfSchemas - 1] = pSchema;
}
tsdbAddSchema(pCTable, pSchema);
if (schemaNCols(pSchema) > pMeta->maxCols) pMeta->maxCols = schemaNCols(pSchema);
if (schemaTLen(pSchema) > pMeta->maxRowBytes) pMeta->maxRowBytes = schemaTLen(pSchema);
......@@ -829,9 +823,7 @@ static STable *tsdbCreateTableFromCfg(STableCfg *pCfg, bool isSuper, STable *pST
TABLE_TID(pTable) = -1;
TABLE_SUID(pTable) = -1;
pTable->pSuper = NULL;
pTable->numOfSchemas = 1;
pTable->schema[0] = tdDupSchema(pCfg->schema);
if (pTable->schema[0] == NULL) {
if (tsdbAddSchema(pTable, tdDupSchema(pCfg->schema)) < 0) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
goto _err;
}
......@@ -842,7 +834,8 @@ static STable *tsdbCreateTableFromCfg(STableCfg *pCfg, bool isSuper, STable *pST
}
pTable->tagVal = NULL;
STColumn *pCol = schemaColAt(pTable->tagSchema, DEFAULT_TAG_INDEX_COLUMN);
pTable->pIndex = tSkipListCreate(TSDB_SUPER_TABLE_SL_LEVEL, colType(pCol), (uint8_t)(colBytes(pCol)), NULL, SL_ALLOW_DUP_KEY, getTagIndexKey);
pTable->pIndex = tSkipListCreate(TSDB_SUPER_TABLE_SL_LEVEL, colType(pCol), (uint8_t)(colBytes(pCol)), NULL,
SL_ALLOW_DUP_KEY, getTagIndexKey);
if (pTable->pIndex == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
goto _err;
......@@ -871,9 +864,7 @@ static STable *tsdbCreateTableFromCfg(STableCfg *pCfg, bool isSuper, STable *pST
}
} else {
TABLE_SUID(pTable) = -1;
pTable->numOfSchemas = 1;
pTable->schema[0] = tdDupSchema(pCfg->schema);
if (pTable->schema[0] == NULL) {
if (tsdbAddSchema(pTable, tdDupSchema(pCfg->schema)) < 0) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
goto _err;
}
......@@ -907,9 +898,7 @@ static void tsdbFreeTable(STable *pTable) {
TABLE_UID(pTable));
tfree(TABLE_NAME(pTable));
if (TABLE_TYPE(pTable) != TSDB_CHILD_TABLE) {
for (int i = 0; i < TSDB_MAX_TABLE_SCHEMAS; i++) {
tdFreeSchema(pTable->schema[i]);
}
tsdbFreeTableSchema(pTable);
if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) {
tdFreeSchema(pTable->tagSchema);
......@@ -1261,9 +1250,10 @@ static int tsdbEncodeTable(void **buf, STable *pTable) {
tlen += taosEncodeFixedU64(buf, TABLE_SUID(pTable));
tlen += tdEncodeKVRow(buf, pTable->tagVal);
} else {
tlen += taosEncodeFixedU8(buf, pTable->numOfSchemas);
for (int i = 0; i < pTable->numOfSchemas; i++) {
tlen += tdEncodeSchema(buf, pTable->schema[i]);
tlen += taosEncodeFixedU8(buf, (uint8_t)taosArrayGetSize(pTable->schema));
for (int i = 0; i < taosArrayGetSize(pTable->schema); i++) {
STSchema *pSchema = taosArrayGetP(pTable->schema, i);
tlen += tdEncodeSchema(buf, pSchema);
}
if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) {
......@@ -1294,9 +1284,12 @@ static void *tsdbDecodeTable(void *buf, STable **pRTable) {
buf = taosDecodeFixedU64(buf, &TABLE_SUID(pTable));
buf = tdDecodeKVRow(buf, &(pTable->tagVal));
} else {
buf = taosDecodeFixedU8(buf, &(pTable->numOfSchemas));
for (int i = 0; i < pTable->numOfSchemas; i++) {
buf = tdDecodeSchema(buf, &(pTable->schema[i]));
uint8_t nSchemas;
buf = taosDecodeFixedU8(buf, &nSchemas);
for (int i = 0; i < nSchemas; i++) {
STSchema *pSchema;
buf = tdDecodeSchema(buf, &pSchema);
tsdbAddSchema(pTable, pSchema);
}
if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) {
......@@ -1458,3 +1451,38 @@ static int tsdbCheckTableTagVal(SKVRow *pKVRow, STSchema *pSchema) {
return 0;
}
static int tsdbAddSchema(STable *pTable, STSchema *pSchema) {
ASSERT(TABLE_TYPE(pTable) != TSDB_CHILD_TABLE);
if (pTable->schema == NULL) {
pTable->schema = taosArrayInit(TSDB_MAX_TABLE_SCHEMAS, sizeof(SSchema *));
if (pTable->schema == NULL) {
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
}
ASSERT(taosArrayGetSize(pTable->schema) == 0 ||
schemaVersion(pSchema) > schemaVersion(*(STSchema **)taosArrayGetLast(pTable->schema)));
if (taosArrayPush(pTable->schema, &pSchema) == NULL) {
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
return 0;
}
static void tsdbFreeTableSchema(STable *pTable) {
ASSERT(pTable != NULL);
if (pTable->schema) {
for (size_t i = 0; i < taosArrayGetSize(pTable->schema); i++) {
STSchema *pSchema = taosArrayGetP(pTable->schema, i);
tdFreeSchema(pSchema);
}
taosArrayDestroy(pTable->schema);
}
}
\ No newline at end of file
......@@ -466,7 +466,7 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC
STsdbMeta* pMeta = tsdbGetMeta(tsdb);
assert(pMeta != NULL);
pQueryHandle->pDataCols = tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pQueryHandle->pTsdb->config.maxRowsPerFileBlock);
pQueryHandle->pDataCols = tdNewDataCols(pMeta->maxCols, pQueryHandle->pTsdb->config.maxRowsPerFileBlock);
if (pQueryHandle->pDataCols == NULL) {
tsdbError("%p failed to malloc buf for pDataCols, %"PRIu64, pQueryHandle, pQueryHandle->qId);
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
......@@ -1446,7 +1446,7 @@ static int doBinarySearchKey(char* pValue, int num, TSKEY key, int order) {
return midPos;
}
int32_t doCopyRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t capacity, int32_t numOfRows, int32_t start, int32_t end) {
static int32_t doCopyRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t capacity, int32_t numOfRows, int32_t start, int32_t end) {
char* pData = NULL;
int32_t step = ASCENDING_TRAVERSE(pQueryHandle->order)? 1 : -1;
......@@ -1481,7 +1481,7 @@ int32_t doCopyRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t capacity
pData = (char*)pColInfo->pData + (capacity - numOfRows - num) * pColInfo->info.bytes;
}
if (pColInfo->info.colId == src->colId) {
if (!isAllRowsNull(src) && pColInfo->info.colId == src->colId) {
if (pColInfo->info.type != TSDB_DATA_TYPE_BINARY && pColInfo->info.type != TSDB_DATA_TYPE_NCHAR) {
memmove(pData, (char*)src->pData + bytes * start, bytes * num);
} else { // handle the var-string
......
......@@ -42,14 +42,14 @@ int tsdbInitReadH(SReadH *pReadh, STsdbRepo *pRepo) {
return -1;
}
pReadh->pDCols[0] = tdNewDataCols(0, 0, pCfg->maxRowsPerFileBlock);
pReadh->pDCols[0] = tdNewDataCols(0, pCfg->maxRowsPerFileBlock);
if (pReadh->pDCols[0] == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
tsdbDestroyReadH(pReadh);
return -1;
}
pReadh->pDCols[1] = tdNewDataCols(0, 0, pCfg->maxRowsPerFileBlock);
pReadh->pDCols[1] = tdNewDataCols(0, pCfg->maxRowsPerFileBlock);
if (pReadh->pDCols[1] == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
tsdbDestroyReadH(pReadh);
......@@ -463,7 +463,7 @@ static int tsdbLoadBlockDataImpl(SReadH *pReadh, SBlock *pBlock, SDataCols *pDat
SDataCol *pDataCol = &(pDataCols->cols[dcol]);
if (dcol != 0 && ccol >= pBlockData->numOfCols) {
// Set current column as NULL and forward
dataColSetNEleNull(pDataCol, pBlock->numOfRows, pDataCols->maxPoints);
dataColReset(pDataCol);
dcol++;
continue;
}
......@@ -503,7 +503,7 @@ static int tsdbLoadBlockDataImpl(SReadH *pReadh, SBlock *pBlock, SDataCols *pDat
ccol++;
} else {
// Set current column as NULL and forward
dataColSetNEleNull(pDataCol, pBlock->numOfRows, pDataCols->maxPoints);
dataColReset(pDataCol);
dcol++;
}
}
......@@ -608,7 +608,7 @@ static int tsdbLoadBlockDataColsImpl(SReadH *pReadh, SBlock *pBlock, SDataCols *
}
if (pBlockCol == NULL) {
dataColSetNEleNull(pDataCol, pBlock->numOfRows, pDataCols->maxPoints);
dataColReset(pDataCol);
continue;
}
......
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import random
import string
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def genColList(self):
'''
generate column list
'''
col_list = list()
for i in range(1, 18):
col_list.append(f'c{i}')
return col_list
def genIncreaseValue(self, input_value):
'''
add ', 1' to end of value every loop
'''
value_list = list(input_value)
value_list.insert(-1, ", 1")
return ''.join(value_list)
def insertAlter(self):
'''
after each alter and insert, when execute 'select * from {tbname};' taosd will coredump
'''
tbname = ''.join(random.choice(string.ascii_letters.lower()) for i in range(7))
input_value = '(now, 1)'
tdSql.execute(f'create table {tbname} (ts timestamp, c0 int);')
tdSql.execute(f'insert into {tbname} values {input_value};')
for col in self.genColList():
input_value = self.genIncreaseValue(input_value)
tdSql.execute(f'alter table {tbname} add column {col} int;')
tdSql.execute(f'insert into {tbname} values {input_value};')
tdSql.query(f'select * from {tbname};')
tdSql.checkRows(18)
def run(self):
tdSql.prepare()
self.insertAlter()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
......@@ -167,11 +167,11 @@ python3 test.py -f tools/taosdemoTestInterlace.py
python3 test.py -f tools/taosdemoTestQuery.py
# nano support
#python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py
#python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py
#python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py
#python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertTime_step.py
#python3 test.py -f tools/taosdumpTestNanoSupport.py
python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py
python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.py
python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanosubscribe.py
python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestInsertTime_step.py
python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdumpTestNanoSupport.py
# update
python3 ./test.py -f update/allow_update.py
......@@ -338,6 +338,7 @@ python3 ./test.py -f functions/function_twa.py -r 1
python3 ./test.py -f functions/function_twa_test2.py
python3 ./test.py -f functions/function_stddev_td2555.py
python3 ./test.py -f functions/showOfflineThresholdIs864000.py
python3 ./test.py -f functions/function_interp.py
python3 ./test.py -f insert/metadataUpdate.py
python3 ./test.py -f query/last_cache.py
python3 ./test.py -f query/last_row_cache.py
......@@ -380,9 +381,10 @@ python3 ./test.py -f query/querySession.py
python3 test.py -f alter/alter_create_exception.py
python3 ./test.py -f insert/flushwhiledrop.py
python3 ./test.py -f insert/schemalessInsert.py
python3 ./test.py -f alter/alterColMultiTimes.py
#======================p4-end===============
# python3 test.py -f tools/taosdemoAllTest/pytest.py
......
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
import numpy as np
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.rowNum = 10
self.ts = 1537146000000
def run(self):
tdSql.prepare()
tdSql.execute("create table t(ts timestamp, k int)")
tdSql.execute("insert into t values('2021-1-1 1:1:1', 12);")
tdSql.query("select interp(*) from t where ts='2021-1-1 1:1:1'")
tdSql.checkRows(1)
tdSql.checkData(0, 1, 12)
tdSql.error("select interp(*) from t where ts >'2021-1-1 1:1:1' and ts < now interval(1s) fill(next)")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
......@@ -705,7 +705,7 @@ class TDTestCase:
case no id when stb exist
"""
self.cleanStb()
input_sql, stb_name = self.genFullTypeSql(t0="f", c0="f")
input_sql, stb_name = self.genFullTypeSql(tb_name="sub_table_0123456", t0="f", c0="f")
self.resCmp(input_sql, stb_name)
input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, id_noexist_tag=True, t0="f", c0="f")
self.resCmp(input_sql, stb_name, condition='where tbname like "t_%"')
......
8.855,"binary_str0" ,1626870128248246976
8.75,"binary_str1" ,1626870128249060032
5.44,"binary_str2" ,1626870128249067968
8.45,"binary_str3" ,1626870128249072064
4.07,"binary_str4" ,1626870128249075904
6.97,"binary_str5" ,1626870128249078976
6.86,"binary_str6" ,1626870128249082048
1.585,"binary_str7" ,1626870128249085120
1.4,"binary_str8" ,1626870128249087936
5.135,"binary_str9" ,1626870128249092032
3.15,"binary_str10" ,1626870128249095104
1.765,"binary_str11" ,1626870128249097920
7.71,"binary_str12" ,1626870128249100992
3.91,"binary_str13" ,1626870128249104064
5.615,"binary_str14" ,1626870128249106880
9.495,"binary_str15" ,1626870128249109952
3.825,"binary_str16" ,1626870128249113024
1.94,"binary_str17" ,1626870128249117120
5.385,"binary_str18" ,1626870128249119936
7.075,"binary_str19" ,1626870128249123008
5.715,"binary_str20" ,1626870128249126080
1.83,"binary_str21" ,1626870128249128896
6.365,"binary_str22" ,1626870128249131968
6.55,"binary_str23" ,1626870128249135040
6.315,"binary_str24" ,1626870128249138112
3.82,"binary_str25" ,1626870128249140928
2.455,"binary_str26" ,1626870128249145024
7.795,"binary_str27" ,1626870128249148096
2.47,"binary_str28" ,1626870128249150912
1.37,"binary_str29" ,1626870128249155008
5.39,"binary_str30" ,1626870128249158080
5.13,"binary_str31" ,1626870128249160896
4.09,"binary_str32" ,1626870128249163968
5.855,"binary_str33" ,1626870128249167040
0.17,"binary_str34" ,1626870128249170112
1.955,"binary_str35" ,1626870128249173952
0.585,"binary_str36" ,1626870128249178048
0.33,"binary_str37" ,1626870128249181120
7.925,"binary_str38" ,1626870128249183936
9.685,"binary_str39" ,1626870128249187008
2.6,"binary_str40" ,1626870128249191104
5.705,"binary_str41" ,1626870128249193920
3.965,"binary_str42" ,1626870128249196992
4.43,"binary_str43" ,1626870128249200064
8.73,"binary_str44" ,1626870128249202880
3.105,"binary_str45" ,1626870128249205952
9.39,"binary_str46" ,1626870128249209024
2.825,"binary_str47" ,1626870128249212096
9.675,"binary_str48" ,1626870128249214912
9.99,"binary_str49" ,1626870128249217984
4.51,"binary_str50" ,1626870128249221056
4.94,"binary_str51" ,1626870128249223872
7.72,"binary_str52" ,1626870128249226944
4.135,"binary_str53" ,1626870128249231040
2.325,"binary_str54" ,1626870128249234112
4.585,"binary_str55" ,1626870128249236928
8.76,"binary_str56" ,1626870128249240000
4.715,"binary_str57" ,1626870128249243072
0.56,"binary_str58" ,1626870128249245888
5.35,"binary_str59" ,1626870128249249984
5.075,"binary_str60" ,1626870128249253056
6.665,"binary_str61" ,1626870128249256128
7.13,"binary_str62" ,1626870128249258944
2.775,"binary_str63" ,1626870128249262016
5.775,"binary_str64" ,1626870128249265088
1.62,"binary_str65" ,1626870128249267904
1.625,"binary_str66" ,1626870128249270976
8.15,"binary_str67" ,1626870128249274048
0.75,"binary_str68" ,1626870128249277120
3.265,"binary_str69" ,1626870128249280960
8.585,"binary_str70" ,1626870128249284032
1.88,"binary_str71" ,1626870128249287104
8.44,"binary_str72" ,1626870128249289920
5.12,"binary_str73" ,1626870128249295040
2.58,"binary_str74" ,1626870128249298112
9.42,"binary_str75" ,1626870128249300928
1.765,"binary_str76" ,1626870128249304000
2.66,"binary_str77" ,1626870128249308096
1.405,"binary_str78" ,1626870128249310912
5.595,"binary_str79" ,1626870128249315008
2.28,"binary_str80" ,1626870128249318080
9.24,"binary_str81" ,1626870128249320896
9.03,"binary_str82" ,1626870128249323968
6.055,"binary_str83" ,1626870128249327040
1.74,"binary_str84" ,1626870128249330112
5.77,"binary_str85" ,1626870128249332928
1.97,"binary_str86" ,1626870128249336000
0.3,"binary_str87" ,1626870128249339072
7.145,"binary_str88" ,1626870128249342912
0.88,"binary_str89" ,1626870128249345984
8.025,"binary_str90" ,1626870128249349056
4.81,"binary_str91" ,1626870128249351872
0.725,"binary_str92" ,1626870128249355968
3.85,"binary_str93" ,1626870128249359040
9.455,"binary_str94" ,1626870128249362112
2.265,"binary_str95" ,1626870128249364928
3.985,"binary_str96" ,1626870128249368000
9.375,"binary_str97" ,1626870128249371072
0.2,"binary_str98" ,1626870128249373888
6.95,"binary_str99" ,1626870128249377984
"string0",7,8.615
"string1",4,9.895
"string2",3,2.92
"string3",3,5.62
"string4",7,1.615
"string5",6,1.45
"string6",5,7.48
"string7",7,3.01
"string8",5,4.76
"string9",10,7.09
"string10",2,8.38
"string11",7,8.65
"string12",5,5.025
"string13",10,5.765
"string14",2,4.57
"string15",2,1.03
"string16",7,6.98
"string17",10,0.23
"string18",7,5.815
"string19",1,2.37
"string20",10,8.865
"string21",3,1.235
"string22",2,8.62
"string23",9,1.045
"string24",8,4.34
"string25",1,5.455
"string26",2,4.475
"string27",1,6.95
"string28",2,3.39
"string29",3,6.79
"string30",7,9.735
"string31",1,9.79
"string32",10,9.955
"string33",1,5.095
"string34",3,3.86
"string35",9,5.105
"string36",10,4.22
"string37",1,2.78
"string38",9,6.345
"string39",1,0.975
"string40",5,6.16
"string41",4,7.735
"string42",5,6.6
"string43",8,2.845
"string44",1,0.655
"string45",3,2.995
"string46",9,3.6
"string47",8,3.47
"string48",3,7.98
"string49",6,2.225
"string50",9,5.44
"string51",4,6.335
"string52",3,2.955
"string53",1,0.565
"string54",6,5.575
"string55",6,9.905
"string56",9,6.025
"string57",8,0.94
"string58",10,0.15
"string59",8,1.555
"string60",4,2.28
"string61",2,8.29
"string62",9,6.22
"string63",6,3.35
"string64",10,6.7
"string65",3,9.345
"string66",7,9.815
"string67",1,5.365
"string68",10,3.81
"string69",1,6.405
"string70",8,2.715
"string71",3,8.58
"string72",8,6.34
"string73",2,7.49
"string74",4,8.64
"string75",3,8.995
"string76",7,3.465
"string77",1,7.64
"string78",6,3.65
"string79",6,1.4
"string80",6,5.875
"string81",2,1.22
"string82",5,7.87
"string83",9,8.41
"string84",9,8.9
"string85",9,3.89
"string86",2,5.0
"string87",2,4.495
"string88",4,2.835
"string89",3,5.895
"string90",7,8.41
"string91",5,5.125
"string92",7,9.165
"string93",5,8.315
"string94",10,7.485
"string95",7,4.635
"string96",2,6.015
"string97",8,0.595
"string98",3,8.79
"string99",4,1.72
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 10,
"thread_count_create_tbl": 10,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 100,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "testdb3",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ms",
"keep": 3600,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 100,
"childtable_prefix": "tb0_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1000,
"start_timestamp": "2021-07-01 00:00:00.000",
"sample_format": "",
"sample_file": "",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2},
{"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1},
{"type": "BOOL"},{"type": "NCHAR","len":16}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}]
}]
}]
}
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 10,
"thread_count_create_tbl": 10,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 100,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "testdb1",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ns",
"keep": 3600,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 100,
"childtable_prefix": "tb0_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1000,
"start_timestamp": "2021-07-01 00:00:00.000",
"sample_format": "",
"sample_file": "",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2},
{"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1},
{"type": "BOOL"},{"type": "NCHAR","len":16}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}]
}]
}]
}
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 10,
"thread_count_create_tbl": 10,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 100,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "testdb2",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "us",
"keep": 3600,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 100,
"childtable_prefix": "tb0_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1000,
"start_timestamp": "2021-07-01 00:00:00.000",
"sample_format": "",
"sample_file": "",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2},
{"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1},
{"type": "BOOL"},{"type": "NCHAR","len":16}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}]
}]
}]
}
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
def run(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/"
# insert: create one or mutiple tables per sql and insert multiple rows per sql
# check the params of taosdemo about time_step is nano
os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoInsertNanoDB.json -y " % binPath)
tdSql.execute("use testdb1")
tdSql.query("show stables")
tdSql.checkData(0, 4, 100)
tdSql.query("select count (tbname) from stb0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from tb0_0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 10000)
tdSql.query("describe stb0")
tdSql.getData(9, 1)
tdSql.checkDataType(9, 1,"TIMESTAMP")
tdSql.query("select last(ts) from stb0")
tdSql.checkData(0, 0,"2021-07-01 00:00:00.000099000")
# check the params of taosdemo about time_step is us
os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoInsertUSDB.json -y " % binPath)
tdSql.execute("use testdb2")
tdSql.query("show stables")
tdSql.checkData(0, 4, 100)
tdSql.query("select count (tbname) from stb0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from tb0_0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 10000)
tdSql.query("describe stb0")
tdSql.getData(9, 1)
tdSql.checkDataType(9, 1,"TIMESTAMP")
tdSql.query("select last(ts) from stb0")
tdSql.checkData(0, 0,"2021-07-01 00:00:00.099000")
# check the params of taosdemo about time_step is ms
os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoInsertMSDB.json -y " % binPath)
tdSql.execute("use testdb3")
tdSql.query("show stables")
tdSql.checkData(0, 4, 100)
tdSql.query("select count (tbname) from stb0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from tb0_0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 10000)
tdSql.query("describe stb0")
tdSql.checkDataType(9, 1,"TIMESTAMP")
tdSql.query("select last(ts) from stb0")
tdSql.checkData(0, 0,"2021-07-01 00:01:39.000")
os.system("rm -rf ./res.txt")
os.system("rm -rf ./*.py.sql")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 10,
"thread_count_create_tbl": 10,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 100,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "nsdb",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ns",
"keep": 3600,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 100,
"childtable_prefix": "tb0_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 10000000,
"start_timestamp": "2021-07-01 00:00:00.000",
"sample_format": "",
"sample_file": "",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2},
{"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1},
{"type": "BOOL"},{"type": "NCHAR","len":16}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}]
},
{
"name": "stb1",
"child_table_exists":"no",
"childtable_count": 100,
"childtable_prefix": "tb1_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 10,
"disorder_range": 1000,
"timestamp_step": 10000000,
"start_timestamp": "2021-07-01 00:00:00.000",
"sample_format": "",
"sample_file": "",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2},
{"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1},
{"type": "BOOL"},{"type": "NCHAR","len":16}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}]
}]
}]
}
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 10,
"thread_count_create_tbl": 10,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 100,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "subnsdb",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ns",
"keep": 3600,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 10,
"childtable_prefix": "tb0_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "samples",
"insert_mode": "taosc",
"insert_rows": 10,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 10000000,
"start_timestamp": "2021-07-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./tools/taosdemoAllTest/NanoTestCase/nano_samples.csv",
"tags_file": "./tools/taosdemoAllTest/NanoTestCase/nano_sampletags.csv",
"columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}],
"tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}]
},
{
"name": "stb1",
"child_table_exists":"no",
"childtable_count": 10,
"childtable_prefix": "tb1_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "samples",
"insert_mode": "taosc",
"insert_rows": 10,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 10,
"disorder_range": 1000,
"timestamp_step": 10000000,
"start_timestamp": "2021-07-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./tools/taosdemoAllTest/NanoTestCase/nano_samples.csv",
"tags_file": "./tools/taosdemoAllTest/NanoTestCase/nano_sampletags.csv",
"columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}],
"tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}]
}]
}]
}
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 10,
"thread_count_create_tbl": 10,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 100,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "nsdb2",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ns",
"keep": 3600,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 100,
"childtable_prefix": "tb0_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 10,
"start_timestamp": "now",
"sample_format": "",
"sample_file": "",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2},
{"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1},
{"type": "BOOL"},{"type": "NCHAR","len":16}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}]
}]
}]
}
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 10,
"thread_count_create_tbl": 10,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 100,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "nsdbcsv",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ns",
"keep": 3600,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 100,
"childtable_prefix": "tb0_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "samples",
"insert_mode": "taosc",
"insert_rows": 100,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 10000000,
"start_timestamp": "2021-07-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./tools/taosdemoAllTest/NanoTestCase/nano_samples.csv",
"tags_file": "./tools/taosdemoAllTest/NanoTestCase/nano_sampletags.csv",
"columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}],
"tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}]
},
{
"name": "stb1",
"child_table_exists":"no",
"childtable_count": 100,
"childtable_prefix": "tb1_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "samples",
"insert_mode": "taosc",
"insert_rows": 100,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 10,
"disorder_range": 1000,
"timestamp_step": 10000000,
"start_timestamp": "2021-07-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./tools/taosdemoAllTest/NanoTestCase/nano_samples.csv",
"tags_file": "./tools/taosdemoAllTest/NanoTestCase/nano_sampletags.csv",
"columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}],
"tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}]
}]
}]
}
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
def run(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/"
# insert: create one or mutiple tables per sql and insert multiple rows per sql
# insert data from a special timestamp
# check stable stb0
os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json -y " % binPath)
tdSql.execute("use nsdb")
tdSql.query("show stables")
tdSql.checkData(0, 4, 100)
tdSql.query("select count (tbname) from stb0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from tb0_0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 10000)
tdSql.query("describe stb0")
tdSql.checkDataType(9, 1,"TIMESTAMP")
tdSql.query("select last(ts) from stb0")
tdSql.checkData(0, 0,"2021-07-01 00:00:00.990000000")
# check stable stb1 which is insert with disord
tdSql.query("select count (tbname) from stb1")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from tb1_0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from stb1")
tdSql.checkData(0, 0, 10000)
# check c8 is an nano timestamp
tdSql.query("describe stb1")
tdSql.checkDataType(9, 1,"TIMESTAMP")
# check insert timestamp_step is nano_second
tdSql.query("select last(ts) from stb1")
tdSql.checkData(0, 0,"2021-07-01 00:00:00.990000000")
# insert data from now time
# check stable stb0
os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json -y " % binPath)
tdSql.execute("use nsdb2")
tdSql.query("show stables")
tdSql.checkData(0, 4, 100)
tdSql.query("select count (tbname) from stb0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from tb0_0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 10000)
# check c8 is an nano timestamp
tdSql.query("describe stb0")
tdSql.checkDataType(9,1,"TIMESTAMP")
# insert by csv files and timetamp is long int , strings in ts and cols
os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json -y " % binPath)
tdSql.execute("use nsdbcsv")
tdSql.query("show stables")
tdSql.checkData(0, 4, 100)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 10000)
tdSql.query("describe stb0")
tdSql.checkDataType(3, 1, "TIMESTAMP")
tdSql.query("select count(*) from stb0 where ts > \"2021-07-01 00:00:00.490000000\"")
tdSql.checkData(0, 0, 5000)
tdSql.query("select count(*) from stb0 where ts < 1626918583000000000")
tdSql.checkData(0, 0, 10000)
os.system("rm -rf ./insert_res.txt")
os.system("rm -rf tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNano*.py.sql")
# taosdemo test insert with command and parameter , detals show taosdemo --help
os.system("%staosdemo -u root -P taosdata -p 6030 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath)
tdSql.query("select count(*) from test.meters")
tdSql.checkData(0, 0, 600)
# check taosdemo -s
sqls_ls = ['drop database if exists nsdbsql;','create database nsdbsql precision "ns" keep 3600 days 6 update 1;',
'use nsdbsql;','CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int);',
'CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2);',
'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32);',
'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 85, 32, 0.76);']
with open("./taosdemoTestNanoCreateDB.sql",mode ="a" ) as sql_files:
for sql in sqls_ls:
sql_files.write(sql+"\n")
sql_files.close()
sleep(10)
os.system("%staosdemo -s taosdemoTestNanoCreateDB.sql -y " % binPath)
tdSql.query("select count(*) from nsdbsql.meters")
tdSql.checkData(0, 0, 2)
os.system("rm -rf ./res.txt")
os.system("rm -rf ./*.py.sql")
os.system("rm -rf ./taosdemoTestNanoCreateDB.sql")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
{
"filetype": "query",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"confirm_parameter_prompt": "no",
"databases": "nsdb",
"query_times": 10,
"query_mode": "taosc",
"specified_table_query": {
"query_interval": 1,
"concurrent": 2,
"sqls": [
{
"sql": "select count(*) from stb0 where ts>\"2021-07-01 00:01:00.000000000 \" ;",
"result": "./query_res0.txt"
},
{
"sql": "select count(*) from stb0 where ts>\"2021-07-01 00:01:00.000000000\" and ts <=\"2021-07-01 00:01:10.000000000\" ;",
"result": "./query_res1.txt"
},
{
"sql": "select count(*) from stb0 where ts>now-20d ;",
"result": "./query_res2.txt"
},
{
"sql": "select max(c10) from stb0;",
"result": "./query_res3.txt"
},
{
"sql": "select min(c1) from stb0;",
"result": "./query_res4.txt"
},
{
"sql": "select avg(c1) from stb0;",
"result": "./query_res5.txt"
},
{
"sql":"select count(*) from stb0 group by tbname;",
"result":"./query_res6.txt"
}
]
},
"super_table_query": {
"stblname": "stb0",
"query_interval": 0,
"threads": 4,
"sqls": [
{
"sql": "select count(*) from xxxx where ts>\"2021-07-01 00:01:00.000000000 \" ;",
"result": "./query_res_tb0.txt"
},
{
"sql":"select count(*) from xxxx where ts>\"2021-07-01 00:01:00.000000000\" and ts <=\"2021-07-01 00:01:10.000000000\" ;",
"result": "./query_res_tb1.txt"
},
{
"sql":"select first(*) from xxxx ;",
"result": "./query_res_tb2.txt"
},
{
"sql":"select last(*) from xxxx;",
"result": "./query_res_tb3.txt"
},
{
"sql":"select last_row(*) from xxxx ;",
"result": "./query_res_tb4.txt"
},
{
"sql":"select max(c10) from xxxx ;",
"result": "./query_res_tb5.txt"
},
{
"sql":"select min(c1) from xxxx ;",
"result": "./query_res_tb6.txt"
},
{
"sql":"select avg(c10) from xxxx ;",
"result": "./query_res_tb7.txt"
}
]
}
}
\ No newline at end of file
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
def run(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/"
# query: query test for nanoSecond with where and max min groupby order
os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json -y " % binPath)
tdSql.execute("use nsdb")
# use where to filter
tdSql.query("select count(*) from stb0 where ts>\"2021-07-01 00:00:00.590000000 \" ")
tdSql.checkData(0, 0, 4000)
tdSql.query("select count(*) from stb0 where ts>\"2021-07-01 00:00:00.000000000\" and ts <=\"2021-07-01 00:00:00.590000000\" ")
tdSql.checkData(0, 0, 5900)
tdSql.query("select count(*) from tb0_0 where ts>\"2021-07-01 00:00:00.590000000 \" ;")
tdSql.checkData(0, 0, 40)
tdSql.query("select count(*) from tb0_0 where ts>\"2021-07-01 00:00:00.000000000\" and ts <=\"2021-07-01 00:00:00.590000000\" ")
tdSql.checkData(0, 0, 59)
# select max min avg from special col
tdSql.query("select max(c10) from stb0;")
print("select max(c10) from stb0 : " , tdSql.getData(0, 0))
tdSql.query("select max(c10) from tb0_0;")
print("select max(c10) from tb0_0 : " , tdSql.getData(0, 0))
tdSql.query("select min(c1) from stb0;")
print( "select min(c1) from stb0 : " , tdSql.getData(0, 0))
tdSql.query("select min(c1) from tb0_0;")
print( "select min(c1) from tb0_0 : " , tdSql.getData(0, 0))
tdSql.query("select avg(c1) from stb0;")
print( "select avg(c1) from stb0 : " , tdSql.getData(0, 0))
tdSql.query("select avg(c1) from tb0_0;")
print( "select avg(c1) from tb0_0 : " , tdSql.getData(0, 0))
tdSql.query("select count(*) from stb0 group by tbname;")
tdSql.checkData(0, 0, 100)
tdSql.checkData(10, 0, 100)
# query : query above sqls by taosdemo and continuously
os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.json -y " % binPath)
os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json -y " % binPath)
tdSql.execute("use nsdbcsv")
tdSql.query("show stables")
tdSql.checkData(0, 4, 100)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 10000)
tdSql.query("describe stb0")
tdSql.checkDataType(3, 1, "TIMESTAMP")
tdSql.query("select count(*) from stb0 where ts >\"2021-07-01 00:00:00.490000000\"")
tdSql.checkData(0, 0, 5000)
tdSql.query("select count(*) from stb0 where ts <now -1d-1h-3s")
tdSql.checkData(0, 0, 10000)
tdSql.query("select count(*) from stb0 where ts < 1626918583000000000")
tdSql.checkData(0, 0, 10000)
tdSql.execute('select count(*) from stb0 where c2 > 162687012800000000')
tdSql.execute('select count(*) from stb0 where c2 < 162687012800000000')
tdSql.execute('select count(*) from stb0 where c2 = 162687012800000000')
tdSql.execute('select count(*) from stb0 where c2 != 162687012800000000')
tdSql.execute('select count(*) from stb0 where c2 <> 162687012800000000')
tdSql.execute('select count(*) from stb0 where c2 > "2021-07-21 20:22:08.248246976"')
tdSql.execute('select count(*) from stb0 where c2 < "2021-07-21 20:22:08.248246976"')
tdSql.execute('select count(*) from stb0 where c2 = "2021-07-21 20:22:08.248246976"')
tdSql.execute('select count(*) from stb0 where c2 != "2021-07-21 20:22:08.248246976"')
tdSql.execute('select count(*) from stb0 where c2 <> "2021-07-21 20:22:08.248246976"')
tdSql.execute('select count(*) from stb0 where ts between "2021-07-01 00:00:00.000000000" and "2021-07-01 00:00:00.990000000"')
tdSql.execute('select count(*) from stb0 where ts between 1625068800000000000 and 1625068801000000000')
tdSql.query('select avg(c0) from stb0 interval(5000000000b)')
tdSql.checkRows(1)
tdSql.query('select avg(c0) from stb0 interval(100000000b)')
tdSql.checkRows(10)
tdSql.error('select avg(c0) from stb0 interval(1b)')
tdSql.error('select avg(c0) from stb0 interval(999b)')
tdSql.query('select avg(c0) from stb0 interval(1000b)')
tdSql.checkRows(100)
tdSql.query('select avg(c0) from stb0 interval(1u)')
tdSql.checkRows(100)
tdSql.query('select avg(c0) from stb0 interval(100000000b) sliding (100000000b)')
tdSql.checkRows(10)
# query : query above sqls by taosdemo and continuously
os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuerycsv.json -y " % binPath)
os.system("rm -rf ./query_res*.txt*")
os.system("rm -rf tools/taosdemoAllTest/NanoTestCase/*.py.sql")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
{
"filetype": "query",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"confirm_parameter_prompt": "no",
"databases": "nsdbcsv",
"query_times": 10,
"query_mode": "taosc",
"specified_table_query": {
"query_interval": 1,
"concurrent": 2,
"sqls": [
{
"sql": "select count(*) from stb0 where ts> \"2021-07-01 00:00:00.490000000\" ;",
"result": "./query_res0.txt"
},
{
"sql": "select count(*) from stb0 where ts < now -22d-1h-3s ;",
"result": "./query_res1.txt"
},
{
"sql": "select count(*) from stb0 where ts < 1626918583000000000 ;",
"result": "./query_res2.txt"
},
{
"sql": "select count(*) from stb0 where c2 <> 162687012800000000;",
"result": "./query_res3.txt"
},
{
"sql": "select count(*) from stb0 where c2 != \"2021-07-21 20:22:08.248246976\";",
"result": "./query_res4.txt"
},
{
"sql": "select count(*) from stb0 where ts between \"2021-07-01 00:00:00.000000000\" and \"2021-07-01 00:00:00.990000000\";",
"result": "./query_res5.txt"
},
{
"sql":"select count(*) from stb0 group by tbname;",
"result":"./query_res6.txt"
},
{
"sql":"select count(*) from stb0 where ts between 1625068800000000000 and 1625068801000000000;",
"result":"./query_res7.txt"
},
{
"sql":"select avg(c0) from stb0 interval(5000000000b);",
"result":"./query_res8.txt"
},
{
"sql":"select avg(c0) from stb0 interval(100000000b) sliding (100000000b);",
"result":"./query_res9.txt"
}
]
},
"super_table_query": {
"stblname": "stb0",
"query_interval": 0,
"threads": 4,
"sqls": [
{
"sql": "select count(*) from xxxx where ts > \"2021-07-01 00:00:00.490000000\" ;",
"result": "./query_res_tb0.txt"
},
{
"sql":"select count(*) from xxxx where ts between \"2021-07-01 00:00:00.000000000\" and \"2021-07-01 00:00:00.990000000\" ;",
"result": "./query_res_tb1.txt"
},
{
"sql":"select first(*) from xxxx ;",
"result": "./query_res_tb2.txt"
},
{
"sql":"select last(*) from xxxx;",
"result": "./query_res_tb3.txt"
},
{
"sql":"select last_row(*) from xxxx ;",
"result": "./query_res_tb4.txt"
},
{
"sql":"select max(c0) from xxxx ;",
"result": "./query_res_tb5.txt"
},
{
"sql":"select min(c0) from xxxx ;",
"result": "./query_res_tb6.txt"
},
{
"sql":"select avg(c0) from xxxx ;",
"result": "./query_res_tb7.txt"
},
{
"sql":"select avg(c0) from xxxx interval(100000000b) sliding (100000000b) ;",
"result": "./query_res_tb8.txt"
}
]
}
}
\ No newline at end of file
{
"filetype":"subscribe",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"databases": "subnsdb",
"confirm_parameter_prompt": "no",
"specified_table_query":
{
"concurrent":2,
"mode":"sync",
"interval":10000,
"restart":"yes",
"keepProgress":"yes",
"sqls": [
{
"sql": "select * from stb0 where ts>= \"2021-07-01 00:00:00.000000000\" ;",
"result": "./subscribe_res0.txt"
},
{
"sql": "select * from stb0 where ts < now -2d-1h-3s ;",
"result": "./subscribe_res1.txt"
},
{
"sql": "select * from stb0 where ts < 1626918583000000000 ;",
"result": "./subscribe_res2.txt"
}]
}
}
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
import time
from datetime import datetime
import subprocess
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
# get the number of subscriptions
def subTimes(self,filename):
self.filename = filename
command = 'cat %s |wc -l'% filename
times = int(subprocess.getstatusoutput(command)[1])
return times
# assert results
def assertCheck(self,filename,subResult,expectResult):
self.filename = filename
self.subResult = subResult
self.expectResult = expectResult
args0 = (filename, subResult, expectResult)
assert subResult == expectResult , "Queryfile:%s ,result is %s != expect: %s" % args0
def run(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/"
# clear env
os.system("ps -ef |grep 'taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json' |grep -v 'grep' |awk '{print $2}'|xargs kill -9")
os.system("rm -rf ./subscribe_res*")
os.system("rm -rf ./all_subscribe_res*")
# insert data
os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseInsertForSub.json" % binPath)
os.system("nohup %staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoSubscribe.json &" % binPath)
query_pid = int(subprocess.getstatusoutput('ps aux|grep "taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoSubscribe.json" |grep -v "grep"|awk \'{print $2}\'')[1])
# merge result files
sleep(5)
os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt")
os.system("cat subscribe_res1.txt* > all_subscribe_res1.txt")
os.system("cat subscribe_res2.txt* > all_subscribe_res2.txt")
# correct subscribeTimes testcase
subTimes0 = self.subTimes("all_subscribe_res0.txt")
self.assertCheck("all_subscribe_res0.txt",subTimes0 ,200)
subTimes1 = self.subTimes("all_subscribe_res1.txt")
self.assertCheck("all_subscribe_res1.txt",subTimes1 ,200)
subTimes2 = self.subTimes("all_subscribe_res2.txt")
self.assertCheck("all_subscribe_res2.txt",subTimes2 ,200)
# insert extral data
tdSql.execute("use subnsdb")
tdSql.execute("insert into tb0_0 values(now,100.1000,'subtest1',now-1s)")
sleep(15)
os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt")
subTimes0 = self.subTimes("all_subscribe_res0.txt")
print("pass")
self.assertCheck("all_subscribe_res0.txt",subTimes0 ,202)
# correct data testcase
os.system("kill -9 %d" % query_pid)
sleep(3)
os.system("rm -rf ./subscribe_res*")
os.system("rm -rf ./all_subscribe*")
os.system("rm -rf ./*.py.sql")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册