提交 608f2394 编写于 作者: S slguan

Merge remote-tracking branch 'origin/develop' into feature/alter

...@@ -46,10 +46,10 @@ matrix: ...@@ -46,10 +46,10 @@ matrix:
pip3 install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python3/ pip3 install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python3/
cd ${TRAVIS_BUILD_DIR}/tests cd ${TRAVIS_BUILD_DIR}/tests
./test-all.sh || travis_terminate $? ./test-all.sh $TRAVIS_EVENT_TYPE || travis_terminate $?
cd ${TRAVIS_BUILD_DIR}/tests/pytest cd ${TRAVIS_BUILD_DIR}/tests/pytest
./simpletest.sh -g 2>&1 | tee mem-error-out.txt ./smoketest.sh -g 2>&1 | tee mem-error-out.txt
sleep 1 sleep 1
# Color setting # Color setting
...@@ -86,13 +86,12 @@ matrix: ...@@ -86,13 +86,12 @@ matrix:
addons: addons:
coverity_scan: coverity_scan:
# GitHub project metadata # GitHub project metadata
# ** specific to your project ** # ** specific to your project **
project: project:
name: TDengine name: TDengine
version: 2.x version: 2.x
description: taosdata/TDengine description: TDengine
# Where email notification of build analysis results will be sent # Where email notification of build analysis results will be sent
notification_email: sdsang@taosdata.com notification_email: sdsang@taosdata.com
......
...@@ -25,6 +25,7 @@ extern "C" { ...@@ -25,6 +25,7 @@ extern "C" {
*/ */
#include "os.h" #include "os.h"
#include "tbuffer.h" #include "tbuffer.h"
#include "exception.h"
#include "qextbuffer.h" #include "qextbuffer.h"
#include "taosdef.h" #include "taosdef.h"
#include "tscSecondaryMerge.h" #include "tscSecondaryMerge.h"
...@@ -177,7 +178,7 @@ bool tscValidateColumnId(STableMetaInfo* pTableMetaInfo, int32_t colId); ...@@ -177,7 +178,7 @@ bool tscValidateColumnId(STableMetaInfo* pTableMetaInfo, int32_t colId);
// get starter position of metric query condition (query on tags) in SSqlCmd.payload // get starter position of metric query condition (query on tags) in SSqlCmd.payload
SCond* tsGetSTableQueryCond(STagCond* pCond, uint64_t uid); SCond* tsGetSTableQueryCond(STagCond* pCond, uint64_t uid);
void tsSetSTableQueryCond(STagCond* pTagCond, uint64_t uid, SBuffer* pBuf); void tsSetSTableQueryCond(STagCond* pTagCond, uint64_t uid, SBufferWriter* bw);
void tscTagCondCopy(STagCond* dest, const STagCond* src); void tscTagCondCopy(STagCond* dest, const STagCond* src);
void tscTagCondRelease(STagCond* pCond); void tscTagCondRelease(STagCond* pCond);
......
...@@ -1185,10 +1185,18 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel ...@@ -1185,10 +1185,18 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel
return invalidSqlErrMsg(pQueryInfo->msg, "invalid arithmetic expression in select clause"); return invalidSqlErrMsg(pQueryInfo->msg, "invalid arithmetic expression in select clause");
} }
SBuffer buf = exprTreeToBinary(pNode); SBufferWriter bw = tbufInitWriter(NULL, false);
TRY(0) {
exprTreeToBinary(&bw, pNode);
} CATCH(code) {
tbufCloseWriter(&bw);
UNUSED(code);
// TODO: other error handling
} END_TRY
size_t len = tbufTell(&buf); size_t len = tbufTell(&bw);
char* c = tbufGetData(&buf, true); char* c = tbufGetData(&bw, true);
// set the serialized binary string as the parameter of arithmetic expression // set the serialized binary string as the parameter of arithmetic expression
addExprParams(pExpr, c, TSDB_DATA_TYPE_BINARY, len, index.tableIndex); addExprParams(pExpr, c, TSDB_DATA_TYPE_BINARY, len, index.tableIndex);
...@@ -3751,7 +3759,15 @@ static int32_t getTagQueryCondExpr(SQueryInfo* pQueryInfo, SCondExpr* pCondExpr, ...@@ -3751,7 +3759,15 @@ static int32_t getTagQueryCondExpr(SQueryInfo* pQueryInfo, SCondExpr* pCondExpr,
SArray* colList = taosArrayInit(10, sizeof(SColIndex)); SArray* colList = taosArrayInit(10, sizeof(SColIndex));
ret = exprTreeFromSqlExpr(&p, p1, NULL, pQueryInfo, colList); ret = exprTreeFromSqlExpr(&p, p1, NULL, pQueryInfo, colList);
SBuffer buf = exprTreeToBinary(p); SBufferWriter bw = tbufInitWriter(NULL, false);
TRY(0) {
exprTreeToBinary(&bw, p);
} CATCH(code) {
tbufCloseWriter(&bw);
UNUSED(code);
// TODO: more error handling
} END_TRY
// add to source column list // add to source column list
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, i); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, i);
...@@ -3765,7 +3781,7 @@ static int32_t getTagQueryCondExpr(SQueryInfo* pQueryInfo, SCondExpr* pCondExpr, ...@@ -3765,7 +3781,7 @@ static int32_t getTagQueryCondExpr(SQueryInfo* pQueryInfo, SCondExpr* pCondExpr,
addRequiredTagColumn(pTableMetaInfo, &index); addRequiredTagColumn(pTableMetaInfo, &index);
} }
tsSetSTableQueryCond(&pQueryInfo->tagCond, uid, &buf); tsSetSTableQueryCond(&pQueryInfo->tagCond, uid, &bw);
doCompactQueryExpr(pExpr); doCompactQueryExpr(pExpr);
tSQLExprDestroy(p1); tSQLExprDestroy(p1);
......
...@@ -47,18 +47,18 @@ SCond* tsGetSTableQueryCond(STagCond* pTagCond, uint64_t uid) { ...@@ -47,18 +47,18 @@ SCond* tsGetSTableQueryCond(STagCond* pTagCond, uint64_t uid) {
return NULL; return NULL;
} }
void tsSetSTableQueryCond(STagCond* pTagCond, uint64_t uid, SBuffer* pBuf) { void tsSetSTableQueryCond(STagCond* pTagCond, uint64_t uid, SBufferWriter* bw) {
if (tbufTell(pBuf) == 0) { if (tbufTell(bw) == 0) {
return; return;
} }
SCond cond = { SCond cond = {
.uid = uid, .uid = uid,
.len = tbufTell(pBuf), .len = tbufTell(bw),
.cond = NULL, .cond = NULL,
}; };
cond.cond = tbufGetData(pBuf, true); cond.cond = tbufGetData(bw, true);
if (pTagCond->pCond == NULL) { if (pTagCond->pCond == NULL) {
pTagCond->pCond = taosArrayInit(3, sizeof(SCond)); pTagCond->pCond = taosArrayInit(3, sizeof(SCond));
......
...@@ -90,9 +90,10 @@ void tSQLBinaryExprTrv(tExprNode *pExprs, SArray* res); ...@@ -90,9 +90,10 @@ void tSQLBinaryExprTrv(tExprNode *pExprs, SArray* res);
uint8_t getBinaryExprOptr(SSQLToken *pToken); uint8_t getBinaryExprOptr(SSQLToken *pToken);
SBuffer exprTreeToBinary(tExprNode* pExprTree); void tExprNodeDestroy(tExprNode *pNode, void (*fp)(void *));
void exprTreeToBinary(SBufferWriter* bw, tExprNode* pExprTree);
tExprNode* exprTreeFromBinary(const void* pBuf, size_t size); tExprNode* exprTreeFromBinary(const void* data, size_t size);
tExprNode* exprTreeFromTableName(const char* tbnameCond); tExprNode* exprTreeFromTableName(const char* tbnameCond);
#ifdef __cplusplus #ifdef __cplusplus
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include "tskiplist.h" #include "tskiplist.h"
#include "queryLog.h" #include "queryLog.h"
#include "tsdbMain.h" #include "tsdbMain.h"
#include "exception.h"
/* /*
* *
...@@ -44,7 +45,6 @@ ...@@ -44,7 +45,6 @@
* *
*/ */
static tExprNode *tExprNodeCreate(SSchema *pSchema, int32_t numOfCols, SSQLToken *pToken); static tExprNode *tExprNodeCreate(SSchema *pSchema, int32_t numOfCols, SSQLToken *pToken);
static void tExprNodeDestroy(tExprNode *pNode, void (*fp)(void *));
static tExprNode *createSyntaxTree(SSchema *pSchema, int32_t numOfCols, char *str, int32_t *i); static tExprNode *createSyntaxTree(SSchema *pSchema, int32_t numOfCols, char *str, int32_t *i);
static void destroySyntaxTree(tExprNode *); static void destroySyntaxTree(tExprNode *);
...@@ -428,7 +428,7 @@ void tSQLBinaryExprToString(tExprNode *pExpr, char *dst, int32_t *len) { ...@@ -428,7 +428,7 @@ void tSQLBinaryExprToString(tExprNode *pExpr, char *dst, int32_t *len) {
static void UNUSED_FUNC destroySyntaxTree(tExprNode *pNode) { tExprNodeDestroy(pNode, NULL); } static void UNUSED_FUNC destroySyntaxTree(tExprNode *pNode) { tExprNodeDestroy(pNode, NULL); }
static void tExprNodeDestroy(tExprNode *pNode, void (*fp)(void *)) { void tExprNodeDestroy(tExprNode *pNode, void (*fp)(void *)) {
if (pNode == NULL) { if (pNode == NULL) {
return; return;
} }
...@@ -1023,104 +1023,116 @@ void tSQLBinaryExprTrv(tExprNode *pExprs, SArray* res) { ...@@ -1023,104 +1023,116 @@ void tSQLBinaryExprTrv(tExprNode *pExprs, SArray* res) {
} }
} }
static void exprTreeToBinaryImpl(tExprNode* pExprTree, SBuffer* pBuf) { static void exprTreeToBinaryImpl(SBufferWriter* bw, tExprNode* expr) {
tbufWrite(pBuf, &pExprTree->nodeType, sizeof(pExprTree->nodeType)); tbufWriteUint8(bw, expr->nodeType);
if (pExprTree->nodeType == TSQL_NODE_VALUE) { if (expr->nodeType == TSQL_NODE_VALUE) {
tVariant* pVal = pExprTree->pVal; tVariant* pVal = expr->pVal;
tbufWrite(pBuf, &pVal->nType, sizeof(pVal->nType)); tbufWriteUint32(bw, pVal->nType);
if (pVal->nType == TSDB_DATA_TYPE_BINARY) { if (pVal->nType == TSDB_DATA_TYPE_BINARY) {
tbufWrite(pBuf, &pVal->nLen, sizeof(pVal->nLen)); tbufWriteInt32(bw, pVal->nLen);
tbufWrite(pBuf, pVal->pz, pVal->nLen); tbufWrite(bw, pVal->pz, pVal->nLen);
} else { } else {
tbufWrite(pBuf, &pVal->pz, sizeof(pVal->i64Key)); tbufWriteInt64(bw, pVal->i64Key);
} }
} else if (pExprTree->nodeType == TSQL_NODE_COL) { } else if (expr->nodeType == TSQL_NODE_COL) {
SSchema* pSchema = pExprTree->pSchema; SSchema* pSchema = expr->pSchema;
tbufWrite(pBuf, &pSchema->colId, sizeof(pSchema->colId)); tbufWriteInt16(bw, pSchema->colId);
tbufWrite(pBuf, &pSchema->bytes, sizeof(pSchema->bytes)); tbufWriteInt16(bw, pSchema->bytes);
tbufWrite(pBuf, &pSchema->type, sizeof(pSchema->type)); tbufWriteUint8(bw, pSchema->type);
tbufWriteString(bw, pSchema->name);
int32_t len = strlen(pSchema->name); } else if (expr->nodeType == TSQL_NODE_EXPR) {
tbufWriteStringLen(pBuf, pSchema->name, len); tbufWriteUint8(bw, expr->_node.optr);
tbufWriteUint8(bw, expr->_node.hasPK);
} else if (pExprTree->nodeType == TSQL_NODE_EXPR) { exprTreeToBinaryImpl(bw, expr->_node.pLeft);
tbufWrite(pBuf, &pExprTree->_node.optr, sizeof(pExprTree->_node.optr)); exprTreeToBinaryImpl(bw, expr->_node.pRight);
tbufWrite(pBuf, &pExprTree->_node.hasPK, sizeof(pExprTree->_node.hasPK));
exprTreeToBinaryImpl(pExprTree->_node.pLeft, pBuf);
exprTreeToBinaryImpl(pExprTree->_node.pRight, pBuf);
} }
} }
SBuffer exprTreeToBinary(tExprNode* pExprTree) { void exprTreeToBinary(SBufferWriter* bw, tExprNode* expr) {
SBuffer buf = {0}; if (expr != NULL) {
if (pExprTree == NULL) { exprTreeToBinaryImpl(bw, expr);
return buf;
} }
}
int32_t code = tbufBeginWrite(&buf);
if (code != 0) { // TODO: these three functions should be made global
return buf; static void* exception_calloc(size_t nmemb, size_t size) {
void* p = calloc(nmemb, size);
if (p == NULL) {
THROW(TSDB_CODE_SERV_OUT_OF_MEMORY);
} }
return p;
exprTreeToBinaryImpl(pExprTree, &buf); }
return buf;
static void* exception_malloc(size_t size) {
void* p = malloc(size);
if (p == NULL) {
THROW(TSDB_CODE_SERV_OUT_OF_MEMORY);
}
return p;
} }
static tExprNode* exprTreeFromBinaryImpl(SBuffer* pBuf) { static char* exception_strdup(const char* str) {
tExprNode* pExpr = calloc(1, sizeof(tExprNode)); char* p = strdup(str);
pExpr->nodeType = tbufReadUint8(pBuf); if (p == NULL) {
THROW(TSDB_CODE_SERV_OUT_OF_MEMORY);
}
return p;
}
static tExprNode* exprTreeFromBinaryImpl(SBufferReader* br) {
int32_t anchor = CLEANUP_GET_ANCHOR();
tExprNode* pExpr = exception_calloc(1, sizeof(tExprNode));
CLEANUP_PUSH_VOID_PTR_PTR(true, tExprNodeDestroy, pExpr, NULL);
pExpr->nodeType = tbufReadUint8(br);
if (pExpr->nodeType == TSQL_NODE_VALUE) { if (pExpr->nodeType == TSQL_NODE_VALUE) {
tVariant* pVal = calloc(1, sizeof(tVariant)); tVariant* pVal = exception_calloc(1, sizeof(tVariant));
if (pVal == NULL) {
// TODO:
}
pExpr->pVal = pVal; pExpr->pVal = pVal;
pVal->nType = tbufReadUint32(pBuf); pVal->nType = tbufReadUint32(br);
if (pVal->nType == TSDB_DATA_TYPE_BINARY) { if (pVal->nType == TSDB_DATA_TYPE_BINARY) {
tbufReadToBuffer(pBuf, &pVal->nLen, sizeof(pVal->nLen)); tbufReadToBuffer(br, &pVal->nLen, sizeof(pVal->nLen));
pVal->pz = calloc(1, pVal->nLen + 1); pVal->pz = calloc(1, pVal->nLen + 1);
tbufReadToBuffer(pBuf, pVal->pz, pVal->nLen); tbufReadToBuffer(br, pVal->pz, pVal->nLen);
} else { } else {
pVal->i64Key = tbufReadInt64(pBuf); pVal->i64Key = tbufReadInt64(br);
} }
} else if (pExpr->nodeType == TSQL_NODE_COL) { } else if (pExpr->nodeType == TSQL_NODE_COL) {
SSchema* pSchema = calloc(1, sizeof(SSchema)); SSchema* pSchema = exception_calloc(1, sizeof(SSchema));
if (pSchema == NULL) {
// TODO:
}
pExpr->pSchema = pSchema; pExpr->pSchema = pSchema;
pSchema->colId = tbufReadInt16(pBuf); pSchema->colId = tbufReadInt16(br);
pSchema->bytes = tbufReadInt16(pBuf); pSchema->bytes = tbufReadInt16(br);
pSchema->type = tbufReadUint8(pBuf); pSchema->type = tbufReadUint8(br);
tbufReadToString(pBuf, pSchema->name, TSDB_COL_NAME_LEN); tbufReadToString(br, pSchema->name, TSDB_COL_NAME_LEN);
} else if (pExpr->nodeType == TSQL_NODE_EXPR) { } else if (pExpr->nodeType == TSQL_NODE_EXPR) {
pExpr->_node.optr = tbufReadUint8(pBuf); pExpr->_node.optr = tbufReadUint8(br);
pExpr->_node.hasPK = tbufReadUint8(pBuf); pExpr->_node.hasPK = tbufReadUint8(br);
pExpr->_node.pLeft = exprTreeFromBinaryImpl(pBuf); pExpr->_node.pLeft = exprTreeFromBinaryImpl(br);
pExpr->_node.pRight = exprTreeFromBinaryImpl(pBuf); pExpr->_node.pRight = exprTreeFromBinaryImpl(br);
assert(pExpr->_node.pLeft != NULL && pExpr->_node.pRight != NULL); assert(pExpr->_node.pLeft != NULL && pExpr->_node.pRight != NULL);
} }
CLEANUP_EXECUTE_TO(anchor, false);
return pExpr; return pExpr;
} }
tExprNode* exprTreeFromBinary(const void* pBuf, size_t size) { tExprNode* exprTreeFromBinary(const void* data, size_t size) {
if (size == 0) { if (size == 0) {
return NULL; return NULL;
} }
SBuffer rbuf = {0}; SBufferReader br = tbufInitReader(data, size, false);
tbufBeginRead(&rbuf, pBuf, size); return exprTreeFromBinaryImpl(&br);
return exprTreeFromBinaryImpl(&rbuf);
} }
tExprNode* exprTreeFromTableName(const char* tbnameCond) { tExprNode* exprTreeFromTableName(const char* tbnameCond) {
...@@ -1128,23 +1140,18 @@ tExprNode* exprTreeFromTableName(const char* tbnameCond) { ...@@ -1128,23 +1140,18 @@ tExprNode* exprTreeFromTableName(const char* tbnameCond) {
return NULL; return NULL;
} }
tExprNode* expr = calloc(1, sizeof(tExprNode)); int32_t anchor = CLEANUP_GET_ANCHOR();
if (expr == NULL) {
// TODO: tExprNode* expr = exception_calloc(1, sizeof(tExprNode));
} CLEANUP_PUSH_VOID_PTR_PTR(true, tExprNodeDestroy, expr, NULL);
expr->nodeType = TSQL_NODE_EXPR; expr->nodeType = TSQL_NODE_EXPR;
tExprNode* left = calloc(1, sizeof(tExprNode)); tExprNode* left = exception_calloc(1, sizeof(tExprNode));
if (left == NULL) {
// TODO:
}
expr->_node.pLeft = left; expr->_node.pLeft = left;
left->nodeType = TSQL_NODE_COL; left->nodeType = TSQL_NODE_COL;
SSchema* pSchema = calloc(1, sizeof(SSchema)); SSchema* pSchema = exception_calloc(1, sizeof(SSchema));
if (pSchema == NULL) {
// TODO:
}
left->pSchema = pSchema; left->pSchema = pSchema;
pSchema->type = TSDB_DATA_TYPE_BINARY; pSchema->type = TSDB_DATA_TYPE_BINARY;
...@@ -1152,36 +1159,24 @@ tExprNode* exprTreeFromTableName(const char* tbnameCond) { ...@@ -1152,36 +1159,24 @@ tExprNode* exprTreeFromTableName(const char* tbnameCond) {
strcpy(pSchema->name, TSQL_TBNAME_L); strcpy(pSchema->name, TSQL_TBNAME_L);
pSchema->colId = -1; pSchema->colId = -1;
tExprNode* right = calloc(1, sizeof(tExprNode)); tExprNode* right = exception_calloc(1, sizeof(tExprNode));
if (right == NULL) {
// TODO
}
expr->_node.pRight = right; expr->_node.pRight = right;
if (strncmp(tbnameCond, QUERY_COND_REL_PREFIX_LIKE, QUERY_COND_REL_PREFIX_LIKE_LEN) == 0) { if (strncmp(tbnameCond, QUERY_COND_REL_PREFIX_LIKE, QUERY_COND_REL_PREFIX_LIKE_LEN) == 0) {
right->nodeType = TSQL_NODE_VALUE; right->nodeType = TSQL_NODE_VALUE;
expr->_node.optr = TSDB_RELATION_LIKE; expr->_node.optr = TSDB_RELATION_LIKE;
tVariant* pVal = calloc(1, sizeof(tVariant)); tVariant* pVal = exception_calloc(1, sizeof(tVariant));
if (pVal == NULL) {
// TODO:
}
right->pVal = pVal; right->pVal = pVal;
pVal->nType = TSDB_DATA_TYPE_BINARY;
size_t len = strlen(tbnameCond + QUERY_COND_REL_PREFIX_LIKE_LEN) + 1; size_t len = strlen(tbnameCond + QUERY_COND_REL_PREFIX_LIKE_LEN) + 1;
pVal->pz = malloc(len); pVal->pz = exception_malloc(len);
if (pVal->pz == NULL) {
// TODO:
}
memcpy(pVal->pz, tbnameCond + QUERY_COND_REL_PREFIX_LIKE_LEN, len); memcpy(pVal->pz, tbnameCond + QUERY_COND_REL_PREFIX_LIKE_LEN, len);
pVal->nType = TSDB_DATA_TYPE_BINARY;
pVal->nLen = (int32_t)len; pVal->nLen = (int32_t)len;
} else if (strncmp(tbnameCond, QUERY_COND_REL_PREFIX_IN, QUERY_COND_REL_PREFIX_IN_LEN) == 0) { } else if (strncmp(tbnameCond, QUERY_COND_REL_PREFIX_IN, QUERY_COND_REL_PREFIX_IN_LEN) == 0) {
right->nodeType = TSQL_NODE_VALUE; right->nodeType = TSQL_NODE_VALUE;
expr->_node.optr = TSDB_RELATION_IN; expr->_node.optr = TSDB_RELATION_IN;
tVariant* pVal = calloc(1, sizeof(tVariant)); tVariant* pVal = exception_calloc(1, sizeof(tVariant));
if (pVal == NULL) {
// TODO:
}
right->pVal = pVal; right->pVal = pVal;
pVal->nType = TSDB_DATA_TYPE_ARRAY; pVal->nType = TSDB_DATA_TYPE_ARRAY;
pVal->arr = taosArrayInit(2, sizeof(char*)); pVal->arr = taosArrayInit(2, sizeof(char*));
...@@ -1192,7 +1187,7 @@ tExprNode* exprTreeFromTableName(const char* tbnameCond) { ...@@ -1192,7 +1187,7 @@ tExprNode* exprTreeFromTableName(const char* tbnameCond) {
cond = e + 1; cond = e + 1;
} else if (*e == ',') { } else if (*e == ',') {
size_t len = e - cond + 1; size_t len = e - cond + 1;
char* p = malloc( len ); char* p = exception_malloc( len );
memcpy(p, cond, len); memcpy(p, cond, len);
p[len - 1] = 0; p[len - 1] = 0;
cond += len; cond += len;
...@@ -1201,12 +1196,13 @@ tExprNode* exprTreeFromTableName(const char* tbnameCond) { ...@@ -1201,12 +1196,13 @@ tExprNode* exprTreeFromTableName(const char* tbnameCond) {
} }
if (*cond != 0) { if (*cond != 0) {
char* p = strdup( cond ); char* p = exception_strdup( cond );
taosArrayPush(pVal->arr, &p); taosArrayPush(pVal->arr, &p);
} }
taosArraySortString(pVal->arr); taosArraySortString(pVal->arr);
} }
CLEANUP_EXECUTE_TO(anchor, false);
return expr; return expr;
} }
\ No newline at end of file
...@@ -550,11 +550,12 @@ tExprNode* createExpr2() { ...@@ -550,11 +550,12 @@ tExprNode* createExpr2() {
void exprSerializeTest1() { void exprSerializeTest1() {
tExprNode* p1 = createExpr1(); tExprNode* p1 = createExpr1();
SBuffer buf = exprTreeToBinary(p1); SBufferWriter bw = tbufInitWriter(NULL, false);
exprTreeToBinary(&bw, p1);
size_t size = tbufTell(&buf); size_t size = tbufTell(&bw);
ASSERT_TRUE(size > 0); ASSERT_TRUE(size > 0);
char* b = tbufGetData(&buf, false); char* b = tbufGetData(&bw, false);
tExprNode* p2 = exprTreeFromBinary(b, size); tExprNode* p2 = exprTreeFromBinary(b, size);
ASSERT_EQ(p1->nodeType, p2->nodeType); ASSERT_EQ(p1->nodeType, p2->nodeType);
...@@ -581,16 +582,17 @@ void exprSerializeTest1() { ...@@ -581,16 +582,17 @@ void exprSerializeTest1() {
tExprTreeDestroy(&p1, nullptr); tExprTreeDestroy(&p1, nullptr);
tExprTreeDestroy(&p2, nullptr); tExprTreeDestroy(&p2, nullptr);
tbufClose(&buf, false); tbufClose(&bw);
} }
void exprSerializeTest2() { void exprSerializeTest2() {
tExprNode* p1 = createExpr2(); tExprNode* p1 = createExpr2();
SBuffer buf = exprTreeToBinary(p1); SBufferWriter bw = tbufInitWriter(NULL, false);
exprTreeToBinary(&bw, p1);
size_t size = tbufTell(&buf); size_t size = tbufTell(&bw);
ASSERT_TRUE(size > 0); ASSERT_TRUE(size > 0);
char* b = tbufGetData(&buf, false); char* b = tbufGetData(&bw, false);
tExprNode* p2 = exprTreeFromBinary(b, size); tExprNode* p2 = exprTreeFromBinary(b, size);
ASSERT_EQ(p1->nodeType, p2->nodeType); ASSERT_EQ(p1->nodeType, p2->nodeType);
...@@ -625,7 +627,7 @@ void exprSerializeTest2() { ...@@ -625,7 +627,7 @@ void exprSerializeTest2() {
tExprTreeDestroy(&p1, nullptr); tExprTreeDestroy(&p1, nullptr);
tExprTreeDestroy(&p2, nullptr); tExprTreeDestroy(&p2, nullptr);
tbufClose(&buf, false); tbufClose(&bw);
} }
} // namespace } // namespace
TEST(testCase, astTest) { TEST(testCase, astTest) {
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include "talgo.h" #include "talgo.h"
#include "tutil.h" #include "tutil.h"
#include "tcompare.h" #include "tcompare.h"
#include "exception.h"
#include "../../../query/inc/qast.h" // todo move to common module #include "../../../query/inc/qast.h" // todo move to common module
#include "../../../query/inc/tlosertree.h" // todo move to util module #include "../../../query/inc/tlosertree.h" // todo move to util module
...@@ -1473,21 +1474,35 @@ int32_t tsdbQueryByTagsCond( ...@@ -1473,21 +1474,35 @@ int32_t tsdbQueryByTagsCond(
} }
int32_t ret = TSDB_CODE_SUCCESS; int32_t ret = TSDB_CODE_SUCCESS;
tExprNode* expr = NULL;
tExprNode* expr = exprTreeFromTableName(tbnameCond); TRY(32) {
tExprNode* tagExpr = exprTreeFromBinary(pTagCond, len); expr = exprTreeFromTableName(tbnameCond);
if (tagExpr != NULL) {
if (expr == NULL) { if (expr == NULL) {
expr = tagExpr; expr = exprTreeFromBinary(pTagCond, len);
} else { } else {
tExprNode* tbnameExpr = expr; CLEANUP_PUSH_VOID_PTR_PTR(true, tExprNodeDestroy, expr, NULL);
expr = calloc(1, sizeof(tExprNode)); tExprNode* tagExpr = exprTreeFromBinary(pTagCond, len);
expr->nodeType = TSQL_NODE_EXPR; if (tagExpr != NULL) {
expr->_node.optr = tagNameRelType; CLEANUP_PUSH_VOID_PTR_PTR(true, tExprNodeDestroy, tagExpr, NULL);
expr->_node.pLeft = tagExpr; tExprNode* tbnameExpr = expr;
expr->_node.pRight = tbnameExpr; expr = calloc(1, sizeof(tExprNode));
if (expr == NULL) {
THROW( TSDB_CODE_SERV_OUT_OF_MEMORY );
}
expr->nodeType = TSQL_NODE_EXPR;
expr->_node.optr = tagNameRelType;
expr->_node.pLeft = tagExpr;
expr->_node.pRight = tbnameExpr;
}
} }
} CLEANUP_EXECUTE();
} CATCH( code ) {
CLEANUP_EXECUTE();
ret = code;
// TODO: more error handling
} END_TRY
doQueryTableList(pSTable, res, expr); doQueryTableList(pSTable, res, expr);
pGroupInfo->numOfTables = taosArrayGetSize(res); pGroupInfo->numOfTables = taosArrayGetSize(res);
......
/*
* Copyright (c) 2020 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_EXCEPTION_H
#define TDENGINE_EXCEPTION_H
#include <setjmp.h>
#include <stdint.h>
#include <stdbool.h>
#include <assert.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* cleanup actions
*/
typedef struct SCleanupAction {
bool failOnly;
uint8_t wrapper;
uint16_t reserved;
void* func;
union {
void* Ptr;
bool Bool;
char Char;
int8_t Int8;
uint8_t Uint8;
int16_t Int16;
uint16_t Uint16;
int Int;
unsigned int Uint;
int32_t Int32;
uint32_t Uint32;
int64_t Int64;
uint64_t Uint64;
float Float;
double Double;
} arg1, arg2;
} SCleanupAction;
/*
* exception hander registration
*/
typedef struct SExceptionNode {
struct SExceptionNode* prev;
jmp_buf jb;
int32_t code;
int32_t maxCleanupAction;
int32_t numCleanupAction;
SCleanupAction* cleanupActions;
} SExceptionNode;
////////////////////////////////////////////////////////////////////////////////
// functions & macros for auto-cleanup
void cleanupPush_void_ptr_ptr ( bool failOnly, void* func, void* arg1, void* arg2 );
void cleanupPush_void_ptr_bool ( bool failOnly, void* func, void* arg1, bool arg2 );
void cleanupPush_void_ptr ( bool failOnly, void* func, void* arg );
void cleanupPush_int_int ( bool failOnly, void* func, int arg );
void cleanupPush_void ( bool failOnly, void* func );
int32_t cleanupGetActionCount();
void cleanupExecuteTo( int32_t anchor, bool failed );
void cleanupExecute( SExceptionNode* node, bool failed );
#define CLEANUP_PUSH_VOID_PTR_PTR( failOnly, func, arg1, arg2 ) cleanupPush_void_ptr_ptr( (failOnly), (void*)(func), (void*)(arg1), (void*)(arg2) )
#define CLEANUP_PUSH_VOID_PTR_BOOL( failOnly, func, arg1, arg2 ) cleanupPush_void_ptr_bool( (failOnly), (void*)(func), (void*)(arg1), (bool)(arg2) )
#define CLEANUP_PUSH_VOID_PTR( failOnly, func, arg ) cleanupPush_void_ptr( (failOnly), (void*)(func), (void*)(arg) )
#define CLEANUP_PUSH_INT_INT( failOnly, func, arg ) cleanupPush_void_ptr( (failOnly), (void*)(func), (int)(arg) )
#define CLEANUP_PUSH_VOID( failOnly, func ) cleanupPush_void( (failOnly), (void*)(func) )
#define CLEANUP_PUSH_FREE( failOnly, arg ) cleanupPush_void_ptr( (failOnly), free, (void*)(arg) )
#define CLEANUP_PUSH_CLOSE( failOnly, arg ) cleanupPush_int_int( (failOnly), close, (int)(arg) )
#define CLEANUP_GET_ANCHOR() cleanupGetActionCount()
#define CLEANUP_EXECUTE_TO( anchor, failed ) cleanupExecuteTo( (anchor), (failed) )
////////////////////////////////////////////////////////////////////////////////
// functions & macros for exception handling
void exceptionPushNode( SExceptionNode* node );
int32_t exceptionPopNode();
void exceptionThrow( int code );
#define TRY(maxCleanupActions) do { \
SExceptionNode exceptionNode = { 0 }; \
SCleanupAction cleanupActions[(maxCleanupActions) > 0 ? (maxCleanupActions) : 1]; \
exceptionNode.maxCleanupAction = (maxCleanupActions) > 0 ? (maxCleanupActions) : 1; \
exceptionNode.cleanupActions = cleanupActions; \
exceptionPushNode( &exceptionNode ); \
int caughtException = setjmp( exceptionNode.jb ); \
if( caughtException == 0 )
#define CATCH( code ) int code = exceptionPopNode(); \
if( caughtException == 1 )
#define FINALLY( code ) int code = exceptionPopNode();
#define END_TRY } while( 0 );
#define THROW( x ) exceptionThrow( (x) )
#define CAUGHT_EXCEPTION() ((bool)(caughtException == 1))
#define CLEANUP_EXECUTE() cleanupExecute( &exceptionNode, CAUGHT_EXCEPTION() )
#ifdef __cplusplus
}
#endif
#endif
...@@ -16,122 +16,163 @@ ...@@ -16,122 +16,163 @@
#ifndef TDENGINE_TBUFFER_H #ifndef TDENGINE_TBUFFER_H
#define TDENGINE_TBUFFER_H #define TDENGINE_TBUFFER_H
#include "setjmp.h" #include <stdint.h>
#include "os.h" #include <stdbool.h>
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
////////////////////////////////////////////////////////////////////////////////
// usage example
/* /*
SBuffer can be used to read or write a buffer, but cannot be used for both #include <stdio.h>
read & write at a same time. Below is an example: #include "exception.h"
int main(int argc, char** argv) { int main( int argc, char** argv ) {
//--------------------- write ------------------------ SBufferWriter bw = tbufInitWriter( NULL, false );
SBuffer wbuf;
int32_t code = tbufBeginWrite(&wbuf); TRY( 1 ) {
if (code != 0) { //--------------------- write ------------------------
// handle errors // reserve 1024 bytes for the buffer to improve performance
return 0; tbufEnsureCapacity( &bw, 1024 );
}
// reserve space for the interger count
// reserve 1024 bytes for the buffer to improve performance size_t pos = tbufReserve( &bw, sizeof(int32_t) );
tbufEnsureCapacity(&wbuf, 1024); // write 5 integers to the buffer
for( int i = 0; i < 5; i++) {
// write 5 integers to the buffer tbufWriteInt32( &bw, i );
for (int i = 0; i < 5; i++) { }
tbufWriteInt32(&wbuf, i); // write the integer count to buffer at reserved position
} tbufWriteInt32At( &bw, pos, 5 );
// write a string to the buffer // write a string to the buffer
tbufWriteString(&wbuf, "this is a string.\n"); tbufWriteString( &bw, "this is a string.\n" );
// acquire the result and close the write buffer
// acquire the result and close the write buffer size_t size = tbufTell( &bw );
size_t size = tbufTell(&wbuf); char* data = tbufGetData( &bw, false );
char* data = tbufGetData(&wbuf, true);
tbufClose(&wbuf, true); //------------------------ read -----------------------
SBufferReader br = tbufInitReader( data, size, false );
// read & print out all integers
//------------------------ read ----------------------- int32_t count = tbufReadInt32( &br );
SBuffer rbuf; for( int i = 0; i < count; i++ ) {
code = tbufBeginRead(&rbuf, data, size); printf( "%d\n", tbufReadInt32(&br) );
if (code != 0) { }
printf("you will see this message after print out 5 integers and a string.\n"); // read & print out a string
tbufClose(&rbuf, false); puts( tbufReadString(&br, NULL) );
return 0; // try read another integer, this result in an error as there no this integer
} tbufReadInt32( &br );
printf( "you should not see this message.\n" );
// read & print out 5 integers } CATCH( code ) {
for (int i = 0; i < 5; i++) { printf( "exception code is: %d, you will see this message after print out 5 integers and a string.\n", code );
printf("%d\n", tbufReadInt32(&rbuf)); } END_TRY
}
tbufCloseWriter( &bw );
// read & print out a string
printf(tbufReadString(&rbuf, NULL));
// try read another integer, this result in an error as there no this integer
tbufReadInt32(&rbuf);
printf("you should not see this message.\n");
tbufClose(&rbuf, false);
return 0; return 0;
} }
*/ */
typedef struct { typedef struct {
jmp_buf jb; bool endian;
char* data; const char* data;
size_t pos; size_t pos;
size_t size; size_t size;
} SBuffer; } SBufferReader;
// common functions can be used in both read & write
#define tbufThrowError(buf, code) longjmp((buf)->jb, (code))
size_t tbufTell(SBuffer* buf);
size_t tbufSeekTo(SBuffer* buf, size_t pos);
size_t tbufSkip(SBuffer* buf, size_t size);
void tbufClose(SBuffer* buf, bool keepData);
// basic read functions
#define tbufBeginRead(buf, _data, len) ((buf)->data = (char*)(_data), ((buf)->pos = 0), ((buf)->size = ((_data) == NULL) ? 0 : (len)), setjmp((buf)->jb))
char* tbufRead(SBuffer* buf, size_t size);
void tbufReadToBuffer(SBuffer* buf, void* dst, size_t size);
const char* tbufReadString(SBuffer* buf, size_t* len);
size_t tbufReadToString(SBuffer* buf, char* dst, size_t size);
// basic write functions
#define tbufBeginWrite(buf) ((buf)->data = NULL, ((buf)->pos = 0), ((buf)->size = 0), setjmp((buf)->jb))
void tbufEnsureCapacity(SBuffer* buf, size_t size);
char* tbufGetData(SBuffer* buf, bool takeOver);
void tbufWrite(SBuffer* buf, const void* data, size_t size);
void tbufWriteAt(SBuffer* buf, size_t pos, const void* data, size_t size);
void tbufWriteStringLen(SBuffer* buf, const char* str, size_t len);
void tbufWriteString(SBuffer* buf, const char* str);
// read & write function for primitive types
#ifndef TBUFFER_DEFINE_FUNCTION
#define TBUFFER_DEFINE_FUNCTION(type, name) \
type tbufRead##name(SBuffer* buf); \
void tbufWrite##name(SBuffer* buf, type data); \
void tbufWrite##name##At(SBuffer* buf, size_t pos, type data);
#endif
TBUFFER_DEFINE_FUNCTION(bool, Bool) typedef struct {
TBUFFER_DEFINE_FUNCTION(char, Char) bool endian;
TBUFFER_DEFINE_FUNCTION(int8_t, Int8) char* data;
TBUFFER_DEFINE_FUNCTION(uint8_t, Uint8) size_t pos;
TBUFFER_DEFINE_FUNCTION(int16_t, Int16) size_t size;
TBUFFER_DEFINE_FUNCTION(uint16_t, Uint16) void* (*allocator)( void*, size_t );
TBUFFER_DEFINE_FUNCTION(int32_t, Int32) } SBufferWriter;
TBUFFER_DEFINE_FUNCTION(uint32_t, Uint32)
TBUFFER_DEFINE_FUNCTION(int64_t, Int64) ////////////////////////////////////////////////////////////////////////////////
TBUFFER_DEFINE_FUNCTION(uint64_t, Uint64) // common functions & macros for both reader & writer
TBUFFER_DEFINE_FUNCTION(float, Float)
TBUFFER_DEFINE_FUNCTION(double, Double) #define tbufTell( buf ) ((buf)->pos)
////////////////////////////////////////////////////////////////////////////////
// reader functions & macros
// *Endian*, if true, reader functions of primitive types will do 'ntoh' automatically
#define tbufInitReader( Data, Size, Endian ) {.endian = (Endian), .data = (Data), .pos = 0, .size = ((Data) == NULL ? 0 :(Size))}
size_t tbufSkip( SBufferReader* buf, size_t size );
const char* tbufRead( SBufferReader* buf, size_t size );
void tbufReadToBuffer( SBufferReader* buf, void* dst, size_t size );
const char* tbufReadString( SBufferReader* buf, size_t* len );
size_t tbufReadToString( SBufferReader* buf, char* dst, size_t size );
const char* tbufReadBinary( SBufferReader* buf, size_t *len );
size_t tbufReadToBinary( SBufferReader* buf, void* dst, size_t size );
bool tbufReadBool( SBufferReader* buf );
char tbufReadChar( SBufferReader* buf );
int8_t tbufReadInt8( SBufferReader* buf );
uint8_t tbufReadUint8( SBufferReader* buf );
int16_t tbufReadInt16( SBufferReader* buf );
uint16_t tbufReadUint16( SBufferReader* buf );
int32_t tbufReadInt32( SBufferReader* buf );
uint32_t tbufReadUint32( SBufferReader* buf );
int64_t tbufReadInt64( SBufferReader* buf );
uint64_t tbufReadUint64( SBufferReader* buf );
float tbufReadFloat( SBufferReader* buf );
double tbufReadDouble( SBufferReader* buf );
////////////////////////////////////////////////////////////////////////////////
// writer functions & macros
// *Allocator*, function to allocate memory, will use 'realloc' if NULL
// *Endian*, if true, writer functions of primitive types will do 'hton' automatically
#define tbufInitWriter( Allocator, Endian ) {.endian = (Endian), .data = NULL, .pos = 0, .size = 0, .allocator = ((Allocator) == NULL ? realloc : (Allocator))}
void tbufCloseWriter( SBufferWriter* buf );
void tbufEnsureCapacity( SBufferWriter* buf, size_t size );
size_t tbufReserve( SBufferWriter* buf, size_t size );
char* tbufGetData( SBufferWriter* buf, bool takeOver );
void tbufWrite( SBufferWriter* buf, const void* data, size_t size );
void tbufWriteAt( SBufferWriter* buf, size_t pos, const void* data, size_t size );
void tbufWriteStringLen( SBufferWriter* buf, const char* str, size_t len );
void tbufWriteString( SBufferWriter* buf, const char* str );
// the prototype of tbufWriteBinary and tbufWrite are identical
// the difference is: tbufWriteBinary writes the length of the data to the buffer
// first, then the actual data, which means the reader don't need to know data
// size before read. Write only write the data itself, which means the reader
// need to know data size before read.
void tbufWriteBinary( SBufferWriter* buf, const void* data, size_t len );
void tbufWriteBool( SBufferWriter* buf, bool data );
void tbufWriteBoolAt( SBufferWriter* buf, size_t pos, bool data );
void tbufWriteChar( SBufferWriter* buf, char data );
void tbufWriteCharAt( SBufferWriter* buf, size_t pos, char data );
void tbufWriteInt8( SBufferWriter* buf, int8_t data );
void tbufWriteInt8At( SBufferWriter* buf, size_t pos, int8_t data );
void tbufWriteUint8( SBufferWriter* buf, uint8_t data );
void tbufWriteUint8At( SBufferWriter* buf, size_t pos, uint8_t data );
void tbufWriteInt16( SBufferWriter* buf, int16_t data );
void tbufWriteInt16At( SBufferWriter* buf, size_t pos, int16_t data );
void tbufWriteUint16( SBufferWriter* buf, uint16_t data );
void tbufWriteUint16At( SBufferWriter* buf, size_t pos, uint16_t data );
void tbufWriteInt32( SBufferWriter* buf, int32_t data );
void tbufWriteInt32At( SBufferWriter* buf, size_t pos, int32_t data );
void tbufWriteUint32( SBufferWriter* buf, uint32_t data );
void tbufWriteUint32At( SBufferWriter* buf, size_t pos, uint32_t data );
void tbufWriteInt64( SBufferWriter* buf, int64_t data );
void tbufWriteInt64At( SBufferWriter* buf, size_t pos, int64_t data );
void tbufWriteUint64( SBufferWriter* buf, uint64_t data );
void tbufWriteUint64At( SBufferWriter* buf, size_t pos, uint64_t data );
void tbufWriteFloat( SBufferWriter* buf, float data );
void tbufWriteFloatAt( SBufferWriter* buf, size_t pos, float data );
void tbufWriteDouble( SBufferWriter* buf, double data );
void tbufWriteDoubleAt( SBufferWriter* buf, size_t pos, double data );
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
#endif #endif
\ No newline at end of file
#include "exception.h"
static _Thread_local SExceptionNode* expList;
void exceptionPushNode( SExceptionNode* node ) {
node->prev = expList;
expList = node;
}
int32_t exceptionPopNode() {
SExceptionNode* node = expList;
expList = node->prev;
return node->code;
}
void exceptionThrow( int code ) {
expList->code = code;
longjmp( expList->jb, 1 );
}
static void cleanupWrapper_void_ptr_ptr( SCleanupAction* ca ) {
void (*func)( void*, void* ) = ca->func;
func( ca->arg1.Ptr, ca->arg2.Ptr );
}
static void cleanupWrapper_void_ptr_bool( SCleanupAction* ca ) {
void (*func)( void*, bool ) = ca->func;
func( ca->arg1.Ptr, ca->arg2.Bool );
}
static void cleanupWrapper_void_ptr( SCleanupAction* ca ) {
void (*func)( void* ) = ca->func;
func( ca->arg1.Ptr );
}
static void cleanupWrapper_int_int( SCleanupAction* ca ) {
int (*func)( int ) = ca->func;
func( (int)(intptr_t)(ca->arg1.Int) );
}
static void cleanupWrapper_void_void( SCleanupAction* ca ) {
void (*func)() = ca->func;
func();
}
typedef void (*wrapper)(SCleanupAction*);
static wrapper wrappers[] = {
cleanupWrapper_void_ptr_ptr,
cleanupWrapper_void_ptr_bool,
cleanupWrapper_void_ptr,
cleanupWrapper_int_int,
cleanupWrapper_void_void,
};
void cleanupPush_void_ptr_ptr( bool failOnly, void* func, void* arg1, void* arg2 ) {
assert( expList->numCleanupAction < expList->maxCleanupAction );
SCleanupAction *ca = expList->cleanupActions + expList->numCleanupAction++;
ca->wrapper = 0;
ca->failOnly = failOnly;
ca->func = func;
ca->arg1.Ptr = arg1;
ca->arg2.Ptr = arg2;
}
void cleanupPush_void_ptr_bool( bool failOnly, void* func, void* arg1, bool arg2 ) {
assert( expList->numCleanupAction < expList->maxCleanupAction );
SCleanupAction *ca = expList->cleanupActions + expList->numCleanupAction++;
ca->wrapper = 1;
ca->failOnly = failOnly;
ca->func = func;
ca->arg1.Ptr = arg1;
ca->arg2.Bool = arg2;
}
void cleanupPush_void_ptr( bool failOnly, void* func, void* arg ) {
assert( expList->numCleanupAction < expList->maxCleanupAction );
SCleanupAction *ca = expList->cleanupActions + expList->numCleanupAction++;
ca->wrapper = 2;
ca->failOnly = failOnly;
ca->func = func;
ca->arg1.Ptr = arg;
}
void cleanupPush_int_int( bool failOnly, void* func, int arg ) {
assert( expList->numCleanupAction < expList->maxCleanupAction );
SCleanupAction *ca = expList->cleanupActions + expList->numCleanupAction++;
ca->wrapper = 3;
ca->failOnly = failOnly;
ca->func = func;
ca->arg1.Int = arg;
}
void cleanupPush_void( bool failOnly, void* func ) {
assert( expList->numCleanupAction < expList->maxCleanupAction );
SCleanupAction *ca = expList->cleanupActions + expList->numCleanupAction++;
ca->wrapper = 4;
ca->failOnly = failOnly;
ca->func = func;
}
int32_t cleanupGetActionCount() {
return expList->numCleanupAction;
}
static void doExecuteCleanup( SExceptionNode* node, int32_t anchor, bool failed ) {
while( node->numCleanupAction > anchor ) {
--node->numCleanupAction;
SCleanupAction *ca = node->cleanupActions + node->numCleanupAction;
if( failed || !(ca->failOnly) )
wrappers[ca->wrapper]( ca );
}
}
void cleanupExecuteTo( int32_t anchor, bool failed ) {
doExecuteCleanup( expList, anchor, failed );
}
void cleanupExecute( SExceptionNode* node, bool failed ) {
doExecuteCleanup( node, 0, failed );
}
\ No newline at end of file
...@@ -16,150 +16,384 @@ ...@@ -16,150 +16,384 @@
#include <stdlib.h> #include <stdlib.h>
#include <memory.h> #include <memory.h>
#include <assert.h> #include <assert.h>
#include <arpa/inet.h>
#include "tbuffer.h"
#include "exception.h"
#include <taoserror.h>
////////////////////////////////////////////////////////////////////////////////
// reader functions
#define TBUFFER_DEFINE_FUNCTION(type, name) \ size_t tbufSkip(SBufferReader* buf, size_t size) {
type tbufRead##name(SBuffer* buf) { \ if( (buf->pos + size) > buf->size ) {
type ret; \ THROW( TSDB_CODE_MEMORY_CORRUPTED );
tbufReadToBuffer(buf, &ret, sizeof(type)); \
return ret; \
}\
void tbufWrite##name(SBuffer* buf, type data) {\
tbufWrite(buf, &data, sizeof(data));\
}\
void tbufWrite##name##At(SBuffer* buf, size_t pos, type data) {\
tbufWriteAt(buf, pos, &data, sizeof(data));\
} }
size_t old = buf->pos;
buf->pos += size;
return old;
}
#include "tbuffer.h" const char* tbufRead( SBufferReader* buf, size_t size ) {
const char* ret = buf->data + buf->pos;
tbufSkip( buf, size );
return ret;
}
void tbufReadToBuffer( SBufferReader* buf, void* dst, size_t size ) {
assert( dst != NULL );
// always using memcpy, leave optimization to compiler
memcpy( dst, tbufRead(buf, size), size );
}
//////////////////////////////////////////////////////////////////////////////// static size_t tbufReadLength( SBufferReader* buf ) {
// common functions // maximum length is 65535, if larger length is required
// this function and the corresponding write function need to be
// revised.
uint16_t l = tbufReadUint16( buf );
return l;
}
size_t tbufTell(SBuffer* buf) { const char* tbufReadString( SBufferReader* buf, size_t* len ) {
return buf->pos; size_t l = tbufReadLength( buf );
const char* ret = buf->data + buf->pos;
tbufSkip( buf, l + 1 );
if( ret[l] != 0 ) {
THROW( TSDB_CODE_MEMORY_CORRUPTED );
}
if( len != NULL ) {
*len = l;
}
return ret;
} }
size_t tbufSeekTo(SBuffer* buf, size_t pos) { size_t tbufReadToString( SBufferReader* buf, char* dst, size_t size ) {
if (pos > buf->size) { assert( dst != NULL );
// TODO: update error code, other tbufThrowError need to be changed too size_t len;
tbufThrowError(buf, 1); const char* str = tbufReadString( buf, &len );
if (len >= size) {
len = size - 1;
} }
size_t old = buf->pos; memcpy( dst, str, len );
buf->pos = pos; dst[len] = 0;
return old; return len;
} }
size_t tbufSkip(SBuffer* buf, size_t size) { const char* tbufReadBinary( SBufferReader* buf, size_t *len ) {
return tbufSeekTo(buf, buf->pos + size); size_t l = tbufReadLength( buf );
const char* ret = buf->data + buf->pos;
tbufSkip( buf, l );
if( len != NULL ) {
*len = l;
}
return ret;
} }
void tbufClose(SBuffer* buf, bool keepData) { size_t tbufReadToBinary( SBufferReader* buf, void* dst, size_t size ) {
if (!keepData) { assert( dst != NULL );
free(buf->data); size_t len;
const char* data = tbufReadBinary( buf, &len );
if( len >= size ) {
len = size;
} }
buf->data = NULL; memcpy( dst, data, len );
buf->pos = 0; return len;
buf->size = 0;
} }
//////////////////////////////////////////////////////////////////////////////// bool tbufReadBool( SBufferReader* buf ) {
// read functions bool ret;
tbufReadToBuffer( buf, &ret, sizeof(ret) );
return ret;
}
char* tbufRead(SBuffer* buf, size_t size) { char tbufReadChar( SBufferReader* buf ) {
char* ret = buf->data + buf->pos; char ret;
tbufSkip(buf, size); tbufReadToBuffer( buf, &ret, sizeof(ret) );
return ret; return ret;
} }
void tbufReadToBuffer(SBuffer* buf, void* dst, size_t size) { int8_t tbufReadInt8( SBufferReader* buf ) {
assert(dst != NULL); int8_t ret;
// always using memcpy, leave optimization to compiler tbufReadToBuffer( buf, &ret, sizeof(ret) );
memcpy(dst, tbufRead(buf, size), size); return ret;
} }
const char* tbufReadString(SBuffer* buf, size_t* len) { uint8_t tbufReadUint8( SBufferReader* buf ) {
uint16_t l = tbufReadUint16(buf); uint8_t ret;
char* ret = buf->data + buf->pos; tbufReadToBuffer( buf, &ret, sizeof(ret) );
tbufSkip(buf, l + 1); return ret;
ret[l] = 0; // ensure the string end with '\0' }
if (len != NULL) {
*len = l; int16_t tbufReadInt16( SBufferReader* buf ) {
int16_t ret;
tbufReadToBuffer( buf, &ret, sizeof(ret) );
if( buf->endian ) {
return (int16_t)ntohs( ret );
} }
return ret; return ret;
} }
size_t tbufReadToString(SBuffer* buf, char* dst, size_t size) { uint16_t tbufReadUint16( SBufferReader* buf ) {
assert(dst != NULL); uint16_t ret;
size_t len; tbufReadToBuffer( buf, &ret, sizeof(ret) );
const char* str = tbufReadString(buf, &len); if( buf->endian ) {
if (len >= size) { return ntohs( ret );
len = size - 1;
} }
memcpy(dst, str, len); return ret;
dst[len] = 0; }
return len;
int32_t tbufReadInt32( SBufferReader* buf ) {
int32_t ret;
tbufReadToBuffer( buf, &ret, sizeof(ret) );
if( buf->endian ) {
return (int32_t)ntohl( ret );
}
return ret;
}
uint32_t tbufReadUint32( SBufferReader* buf ) {
uint32_t ret;
tbufReadToBuffer( buf, &ret, sizeof(ret) );
if( buf->endian ) {
return ntohl( ret );
}
return ret;
}
int64_t tbufReadInt64( SBufferReader* buf ) {
int64_t ret;
tbufReadToBuffer( buf, &ret, sizeof(ret) );
if( buf->endian ) {
return (int64_t)htobe64( ret ); // TODO: ntohll
}
return ret;
}
uint64_t tbufReadUint64( SBufferReader* buf ) {
uint64_t ret;
tbufReadToBuffer( buf, &ret, sizeof(ret) );
if( buf->endian ) {
return htobe64( ret ); // TODO: ntohll
}
return ret;
}
float tbufReadFloat( SBufferReader* buf ) {
uint32_t ret = tbufReadUint32( buf );
return *(float*)( &ret );
} }
double tbufReadDouble(SBufferReader* buf) {
uint64_t ret = tbufReadUint64( buf );
return *(double*)( &ret );
}
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// write functions // writer functions
void tbufEnsureCapacity(SBuffer* buf, size_t size) { void tbufCloseWriter( SBufferWriter* buf ) {
(*buf->allocator)( buf->data, 0 );
buf->data = NULL;
buf->pos = 0;
buf->size = 0;
}
void tbufEnsureCapacity( SBufferWriter* buf, size_t size ) {
size += buf->pos; size += buf->pos;
if (size > buf->size) { if( size > buf->size ) {
size_t nsize = size + buf->size; size_t nsize = size + buf->size;
char* data = realloc(buf->data, nsize); char* data = (*buf->allocator)( buf->data, nsize );
if (data == NULL) { // TODO: the exception should be thrown by the allocator function
tbufThrowError(buf, 2); if( data == NULL ) {
THROW( TSDB_CODE_SERV_OUT_OF_MEMORY );
} }
buf->data = data; buf->data = data;
buf->size = nsize; buf->size = nsize;
} }
} }
char* tbufGetData(SBuffer* buf, bool takeOver) { size_t tbufReserve( SBufferWriter* buf, size_t size ) {
tbufEnsureCapacity( buf, size );
size_t old = buf->pos;
buf->pos += size;
return old;
}
char* tbufGetData( SBufferWriter* buf, bool takeOver ) {
char* ret = buf->data; char* ret = buf->data;
if (takeOver) { if( takeOver ) {
buf->pos = 0; buf->pos = 0;
buf->size = 0; buf->size = 0;
buf->data = NULL; buf->data = NULL;
} }
return ret; return ret;
} }
void tbufEndWrite(SBuffer* buf) { void tbufWrite( SBufferWriter* buf, const void* data, size_t size ) {
free(buf->data); assert( data != NULL );
buf->data = NULL; tbufEnsureCapacity( buf, size );
buf->pos = 0; memcpy( buf->data + buf->pos, data, size );
buf->size = 0;
}
void tbufWrite(SBuffer* buf, const void* data, size_t size) {
assert(data != NULL);
tbufEnsureCapacity(buf, size);
memcpy(buf->data + buf->pos, data, size);
buf->pos += size; buf->pos += size;
} }
void tbufWriteAt(SBuffer* buf, size_t pos, const void* data, size_t size) { void tbufWriteAt( SBufferWriter* buf, size_t pos, const void* data, size_t size ) {
assert(data != NULL); assert( data != NULL );
// this function can only be called to fill the gap on previous writes, // this function can only be called to fill the gap on previous writes,
// so 'pos + size <= buf->pos' must be true // so 'pos + size <= buf->pos' must be true
assert(pos + size <= buf->pos); assert( pos + size <= buf->pos );
memcpy(buf->data + pos, data, size); memcpy( buf->data + pos, data, size );
} }
void tbufWriteStringLen(SBuffer* buf, const char* str, size_t len) { static void tbufWriteLength( SBufferWriter* buf, size_t len ) {
// maximum string length is 65535, if longer string is required // maximum length is 65535, if larger length is required
// this function and the corresponding read function need to be // this function and the corresponding read function need to be
// revised. // revised.
assert(len <= 0xffff); assert( len <= 0xffff );
tbufWriteUint16(buf, (uint16_t)len); tbufWriteUint16( buf, (uint16_t)len );
tbufWrite(buf, str, len + 1); }
void tbufWriteStringLen( SBufferWriter* buf, const char* str, size_t len ) {
tbufWriteLength( buf, len );
tbufWrite( buf, str, len );
tbufWriteChar( buf, '\0' );
}
void tbufWriteString( SBufferWriter* buf, const char* str ) {
tbufWriteStringLen( buf, str, strlen(str) );
}
void tbufWriteBinary( SBufferWriter* buf, const void* data, size_t len ) {
tbufWriteLength( buf, len );
tbufWrite( buf, data, len );
}
void tbufWriteBool( SBufferWriter* buf, bool data ) {
tbufWrite( buf, &data, sizeof(data) );
}
void tbufWriteBoolAt( SBufferWriter* buf, size_t pos, bool data ) {
tbufWriteAt( buf, pos, &data, sizeof(data) );
}
void tbufWriteChar( SBufferWriter* buf, char data ) {
tbufWrite( buf, &data, sizeof(data) );
}
void tbufWriteCharAt( SBufferWriter* buf, size_t pos, char data ) {
tbufWriteAt( buf, pos, &data, sizeof(data) );
}
void tbufWriteInt8( SBufferWriter* buf, int8_t data ) {
tbufWrite( buf, &data, sizeof(data) );
}
void tbufWriteInt8At( SBufferWriter* buf, size_t pos, int8_t data ) {
tbufWriteAt( buf, pos, &data, sizeof(data) );
}
void tbufWriteUint8( SBufferWriter* buf, uint8_t data ) {
tbufWrite( buf, &data, sizeof(data) );
}
void tbufWriteUint8At( SBufferWriter* buf, size_t pos, uint8_t data ) {
tbufWriteAt( buf, pos, &data, sizeof(data) );
}
void tbufWriteInt16( SBufferWriter* buf, int16_t data ) {
if( buf->endian ) {
data = (int16_t)htons( data );
}
tbufWrite( buf, &data, sizeof(data) );
}
void tbufWriteInt16At( SBufferWriter* buf, size_t pos, int16_t data ) {
if( buf->endian ) {
data = (int16_t)htons( data );
}
tbufWriteAt( buf, pos, &data, sizeof(data) );
}
void tbufWriteUint16( SBufferWriter* buf, uint16_t data ) {
if( buf->endian ) {
data = htons( data );
}
tbufWrite( buf, &data, sizeof(data) );
}
void tbufWriteUint16At( SBufferWriter* buf, size_t pos, uint16_t data ) {
if( buf->endian ) {
data = htons( data );
}
tbufWriteAt( buf, pos, &data, sizeof(data) );
}
void tbufWriteInt32( SBufferWriter* buf, int32_t data ) {
if( buf->endian ) {
data = (int32_t)htonl( data );
}
tbufWrite( buf, &data, sizeof(data) );
}
void tbufWriteInt32At( SBufferWriter* buf, size_t pos, int32_t data ) {
if( buf->endian ) {
data = (int32_t)htonl( data );
}
tbufWriteAt( buf, pos, &data, sizeof(data) );
}
void tbufWriteUint32( SBufferWriter* buf, uint32_t data ) {
if( buf->endian ) {
data = htonl( data );
}
tbufWrite( buf, &data, sizeof(data) );
}
void tbufWriteUint32At( SBufferWriter* buf, size_t pos, uint32_t data ) {
if( buf->endian ) {
data = htonl( data );
}
tbufWriteAt( buf, pos, &data, sizeof(data) );
}
void tbufWriteInt64( SBufferWriter* buf, int64_t data ) {
if( buf->endian ) {
data = (int64_t)htobe64( data );
}
tbufWrite( buf, &data, sizeof(data) );
}
void tbufWriteInt64At( SBufferWriter* buf, size_t pos, int64_t data ) {
if( buf->endian ) {
data = (int64_t)htobe64( data );
}
tbufWriteAt( buf, pos, &data, sizeof(data) );
}
void tbufWriteUint64( SBufferWriter* buf, uint64_t data ) {
if( buf->endian ) {
data = htobe64( data );
}
tbufWrite( buf, &data, sizeof(data) );
}
void tbufWriteUint64At( SBufferWriter* buf, size_t pos, uint64_t data ) {
if( buf->endian ) {
data = htobe64( data );
}
tbufWriteAt( buf, pos, &data, sizeof(data) );
}
void tbufWriteFloat( SBufferWriter* buf, float data ) {
tbufWriteUint32( buf, *(uint32_t*)(&data) );
}
void tbufWriteFloatAt( SBufferWriter* buf, size_t pos, float data ) {
tbufWriteUint32At( buf, pos, *(uint32_t*)(&data) );
}
void tbufWriteDouble( SBufferWriter* buf, double data ) {
tbufWriteUint64( buf, *(uint64_t*)(&data) );
} }
void tbufWriteString(SBuffer* buf, const char* str) { void tbufWriteDoubleAt( SBufferWriter* buf, size_t pos, double data ) {
tbufWriteStringLen(buf, str, strlen(str)); tbufWriteUint64At( buf, pos, *(uint64_t*)(&data) );
} }
#!/bin/bash
python3 ./test.py $1 -f insert/basic.py
python3 ./test.py $1 -f insert/int.py
python3 ./test.py $1 -f insert/float.py
python3 ./test.py $1 -f insert/bigint.py
python3 ./test.py $1 -f insert/bool.py
python3 ./test.py $1 -f insert/double.py
python3 ./test.py $1 -f insert/smallint.py
python3 ./test.py $1 -f insert/tinyint.py
python3 ./test.py $1 -f insert/date.py
python3 ./test.py $1 -f insert/binary.py
python3 ./test.py $1 -f import_merge/importBlock1HO.py
python3 ./test.py $1 -f import_merge/importBlock1HPO.py
python3 ./test.py $1 -f import_merge/importBlock1H.py
python3 ./test.py $1 -f import_merge/importBlock1S.py
python3 ./test.py $1 -f import_merge/importBlock1Sub.py
python3 ./test.py $1 -f import_merge/importBlock1TO.py
python3 ./test.py $1 -f import_merge/importBlock1TPO.py
python3 ./test.py $1 -f import_merge/importBlock1T.py
python3 ./test.py $1 -f import_merge/importBlock2HO.py
python3 ./test.py $1 -f import_merge/importBlock2HPO.py
python3 ./test.py $1 -f import_merge/importBlock2H.py
python3 ./test.py $1 -f import_merge/importBlock2S.py
python3 ./test.py $1 -f import_merge/importBlock2Sub.py
python3 ./test.py $1 -f import_merge/importBlock2TO.py
python3 ./test.py $1 -f import_merge/importBlock2TPO.py
python3 ./test.py $1 -f import_merge/importBlock2T.py
python3 ./test.py $1 -f import_merge/importBlockbetween.py
python3 ./test.py $1 -f import_merge/importCacheFileHO.py
python3 ./test.py $1 -f import_merge/importCacheFileHPO.py
python3 ./test.py $1 -f import_merge/importCacheFileH.py
python3 ./test.py $1 -f import_merge/importCacheFileS.py
python3 ./test.py $1 -f import_merge/importCacheFileSub.py
python3 ./test.py $1 -f import_merge/importCacheFileTO.py
python3 ./test.py $1 -f import_merge/importCacheFileTPO.py
python3 ./test.py $1 -f import_merge/importCacheFileT.py
python3 ./test.py $1 -f import_merge/importDataH2.py
python3 ./test.py $1 -f import_merge/importDataHO2.py
python3 ./test.py $1 -f import_merge/importDataHO.py
python3 ./test.py $1 -f import_merge/importDataHPO.py
python3 ./test.py $1 -f import_merge/importDataLastHO.py
python3 ./test.py $1 -f import_merge/importDataLastHPO.py
python3 ./test.py $1 -f import_merge/importDataLastH.py
python3 ./test.py $1 -f import_merge/importDataLastS.py
python3 ./test.py $1 -f import_merge/importDataLastSub.py
python3 ./test.py $1 -f import_merge/importDataLastTO.py
python3 ./test.py $1 -f import_merge/importDataLastTPO.py
python3 ./test.py $1 -f import_merge/importDataLastT.py
python3 ./test.py $1 -f import_merge/importDataS.py
python3 ./test.py $1 -f import_merge/importDataSub.py
python3 ./test.py $1 -f import_merge/importDataTO.py
python3 ./test.py $1 -f import_merge/importDataTPO.py
python3 ./test.py $1 -f import_merge/importDataT.py
python3 ./test.py $1 -f import_merge/importHeadOverlap.py
python3 ./test.py $1 -f import_merge/importHeadPartOverlap.py
python3 ./test.py $1 -f import_merge/importHead.py
python3 ./test.py $1 -f import_merge/importHORestart.py
python3 ./test.py $1 -f import_merge/importHPORestart.py
python3 ./test.py $1 -f import_merge/importHRestart.py
python3 ./test.py $1 -f import_merge/importLastHO.py
python3 ./test.py $1 -f import_merge/importLastHPO.py
python3 ./test.py $1 -f import_merge/importLastH.py
python3 ./test.py $1 -f import_merge/importLastS.py
python3 ./test.py $1 -f import_merge/importLastSub.py
python3 ./test.py $1 -f import_merge/importLastTO.py
python3 ./test.py $1 -f import_merge/importLastTPO.py
python3 ./test.py $1 -f import_merge/importLastT.py
python3 ./test.py $1 -f import_merge/importSpan.py
python3 ./test.py $1 -f import_merge/importSRestart.py
python3 ./test.py $1 -f import_merge/importSubRestart.py
python3 ./test.py $1 -f import_merge/importTailOverlap.py
python3 ./test.py $1 -f import_merge/importTailPartOverlap.py
python3 ./test.py $1 -f import_merge/importTail.py
python3 ./test.py $1 -f import_merge/importToCommit.py
python3 ./test.py $1 -f import_merge/importTORestart.py
python3 ./test.py $1 -f import_merge/importTPORestart.py
python3 ./test.py $1 -f import_merge/importTRestart.py
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info("one block can import 38 records")
tdLog.info("================= step2")
tdLog.info("import 38 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 39):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(38)
tdLog.info("================= step4")
tdLog.info("import 1 data before")
startTime = self.startTime - 1
tdSql.execute('import into tb1 values(%ld, %d)' % (startTime, rid))
tdLog.info("================= step5")
tdSql.query('select * from tb1')
tdSql.checkRows(39)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info("one block can import 38 records")
tdLog.info("================= step2")
tdLog.info("import 38 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 39):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(38)
tdLog.info("================= step4")
tdLog.info("import 10 data before with overlap")
startTime = self.startTime - 5
sqlcmd = ['import into tb1 values']
for rid in range(1, 11):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step5")
tdSql.query('select * from tb1')
tdSql.checkRows(43)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info("one block can import 38 records")
tdLog.info("================= step2")
tdLog.info("import 38 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 11):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
for rid in range(15, 43):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(38)
tdLog.info("================= step4")
tdLog.info("import 20 data before with partly overlap")
startTime = self.startTime - 5
sqlcmd = ['import into tb1 values']
for rid in range(1, 21):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step5")
tdSql.query('select * from tb1')
tdSql.checkRows(47)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info("one block can import 38 records")
tdLog.info("================= step2")
tdLog.info("import 38 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 39):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(38)
tdLog.info("================= step4")
tdLog.info("import 50 data covering existing data")
startTime = self.startTime - 5
sqlcmd = ['import into tb1 values']
for rid in range(1, 51):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step5")
tdSql.query('select * from tb1')
tdSql.checkRows(50)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info("one block can import 38 records")
tdLog.info("================= step2")
tdLog.info("import 38 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 39):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(38)
tdLog.info("================= step4")
tdLog.info("import 10 data totally repetitive")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 11):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step5")
tdSql.query('select * from tb1')
tdSql.checkRows(38)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info("one block can import 38 records")
tdLog.info("================= step2")
tdLog.info("import 38 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 39):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(38)
tdLog.info("================= step4")
tdLog.info("import 1 data after")
startTime = self.startTime + 38
tdSql.execute('import into tb1 values(%ld, %d)' % (startTime + 1, rid))
tdLog.info("================= step5")
tdSql.query('select * from tb1')
tdSql.checkRows(39)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info("one block can import 38 records")
tdLog.info("================= step2")
tdLog.info("import 38 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 39):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(38)
tdLog.info("================= step4")
tdLog.info("import 10 data later with overlap")
startTime = self.startTime + 30
sqlcmd = ['import into tb1 values']
for rid in range(1, 11):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step5")
tdSql.query('select * from tb1')
tdSql.checkRows(40)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info("one block can import 38 records")
tdLog.info("================= step2")
tdLog.info("import 38 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 31):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
for rid in range(35, 43):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(38)
tdLog.info("================= step4")
tdLog.info("import 30 data later with partly overlap")
startTime = self.startTime + 25
sqlcmd = ['import into tb1 values']
for rid in range(1, 31):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step5")
tdSql.query('select * from tb1')
tdSql.checkRows(55)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info("one block can import 38 records")
tdLog.info("================= step2")
tdLog.info("import 76 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 77):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(76)
tdLog.info("================= step4")
tdLog.info("import 1 data before")
startTime = self.startTime - 1
tdSql.execute('import into tb1 values(%ld, %d)' % (startTime, rid))
tdLog.info("================= step5")
tdSql.query('select * from tb1')
tdSql.checkRows(77)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info("one block can import 38 records")
tdLog.info("================= step2")
tdLog.info("import 76 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 77):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(76)
tdLog.info("================= step4")
tdLog.info("import 10 data before with overlap")
startTime = self.startTime - 5
sqlcmd = ['import into tb1 values']
for rid in range(1, 11):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step5")
tdSql.query('select * from tb1')
tdSql.checkRows(81)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info("one block can import 38 records")
tdLog.info("================= step2")
tdLog.info("import 76 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 11):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
for rid in range(15, 81):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(76)
tdLog.info("================= step4")
tdLog.info("import 20 data before with partly overlap")
startTime = self.startTime - 5
sqlcmd = ['import into tb1 values']
for rid in range(1, 21):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step5")
tdSql.query('select * from tb1')
tdSql.checkRows(85)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info("one block can import 38 records")
tdLog.info("================= step2")
tdLog.info("import 76 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 77):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(76)
tdLog.info("================= step4")
tdLog.info("import 90 data covering existing data")
startTime = self.startTime - 5
sqlcmd = ['import into tb1 values']
for rid in range(1, 91):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step5")
tdSql.query('select * from tb1')
tdSql.checkRows(90)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info("one block can import 38 records")
tdLog.info("================= step2")
tdLog.info("import 76 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 77):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(76)
tdLog.info("================= step4")
tdLog.info("import 10 data totally repetitive")
startTime = self.startTime + 10
sqlcmd = ['import into tb1 values']
for rid in range(1, 11):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step5")
tdSql.query('select * from tb1')
tdSql.checkRows(76)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info("one block can import 38 records")
tdLog.info("================= step2")
tdLog.info("import 76 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 77):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(76)
tdLog.info("================= step4")
tdLog.info("import 1 data after")
startTime = self.startTime + 76
tdSql.execute('import into tb1 values(%ld, %d)' % (startTime + 1, rid))
tdLog.info("================= step5")
tdSql.query('select * from tb1')
tdSql.checkRows(77)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info("one block can import 38 records")
tdLog.info("================= step2")
tdLog.info("import 76 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 77):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(76)
tdLog.info("================= step4")
tdLog.info("import 10 data later with overlap")
startTime = self.startTime + 70
sqlcmd = ['import into tb1 values']
for rid in range(1, 11):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step5")
tdSql.query('select * from tb1')
tdSql.checkRows(80)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info("one block can import 38 records")
tdLog.info("================= step2")
tdLog.info("import 76 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 61):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
for rid in range(65, 81):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(76)
tdLog.info("================= step4")
tdLog.info("import 30 data later with partly overlap")
startTime = self.startTime + 55
sqlcmd = ['import into tb1 values']
for rid in range(1, 31):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step5")
tdSql.query('select * from tb1')
tdSql.checkRows(85)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info("one block can import 38 records")
tdLog.info("================= step2")
tdLog.info("import 76 sequential data with gap between 2 blocks")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 39):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
for rid in range(40, 78):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(76)
tdLog.info("================= step4")
tdLog.info("import data into the gap between 2 blocks")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(39, 40):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step5")
tdSql.query('select * from tb1')
tdSql.checkRows(77)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db')
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info("================= step2")
tdLog.info("import 10 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 11):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(10)
tdLog.info("================= step4")
tdDnodes.stop(1)
tdLog.sleep(5)
tdDnodes.start(1)
tdLog.info("================= step5")
tdLog.info("import 10 data again")
startTime = self.startTime + 10
sqlcmd = ['import into tb1 values']
for rid in range(1, 11):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step6")
tdSql.query('select * from tb1')
tdSql.checkRows(20)
tdLog.info("================= step7")
tdLog.info("import 1 data before")
startTime = self.startTime - 1
tdSql.execute('import into tb1 values(%ld, %d)' % (startTime, rid))
tdLog.info("================= step8")
tdSql.query('select * from tb1')
tdSql.checkRows(21)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db')
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info("================= step2")
tdLog.info("import 10 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 11):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(10)
tdLog.info("================= step4")
tdDnodes.stop(1)
tdLog.sleep(5)
tdDnodes.start(1)
tdLog.info("================= step5")
tdLog.info("import 10 data again")
startTime = self.startTime + 10
sqlcmd = ['import into tb1 values']
for rid in range(1, 11):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step6")
tdSql.query('select * from tb1')
tdSql.checkRows(20)
tdLog.info("================= step7")
tdLog.info("import 10 data before with overlap")
startTime = self.startTime - 5
sqlcmd = ['import into tb1 values']
for rid in range(1, 11):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step8")
tdSql.query('select * from tb1')
tdSql.checkRows(25)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db')
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info("================= step2")
tdLog.info("import 9 sequential data with gap")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 11):
if (rid == 5):
continue
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(9)
tdLog.info("================= step4")
tdDnodes.stop(1)
tdLog.sleep(5)
tdDnodes.start(1)
tdLog.info("================= step5")
tdLog.info("import 9 data again with gap")
startTime = self.startTime + 10
sqlcmd = ['import into tb1 values']
for rid in range(1, 11):
if (rid == 5):
continue
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step6")
tdSql.query('select * from tb1')
tdSql.checkRows(18)
tdLog.info("================= step7")
tdLog.info("import 20 data before with partly overlap")
startTime = self.startTime - 3
sqlcmd = ['import into tb1 values']
for rid in range(1, 21):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step8")
tdSql.query('select * from tb1')
tdSql.checkRows(23)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db')
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info("================= step2")
tdLog.info("import 10 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 11):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(10)
tdLog.info("================= step4")
tdDnodes.stop(1)
tdLog.sleep(5)
tdDnodes.start(1)
tdLog.info("================= step5")
tdLog.info("import 10 data again")
startTime = self.startTime + 10
sqlcmd = ['import into tb1 values']
for rid in range(1, 11):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step6")
tdSql.query('select * from tb1')
tdSql.checkRows(20)
tdLog.info("================= step7")
tdLog.info("import 30 data covering existing data")
startTime = self.startTime - 5
sqlcmd = ['import into tb1 values']
for rid in range(1, 31):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step8")
tdSql.query('select * from tb1')
tdSql.checkRows(30)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db')
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info("================= step2")
tdLog.info("import 10 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 11):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(10)
tdLog.info("================= step4")
tdDnodes.stop(1)
tdLog.sleep(5)
tdDnodes.start(1)
tdLog.info("================= step5")
tdLog.info("import 10 data again")
startTime = self.startTime + 10
sqlcmd = ['import into tb1 values']
for rid in range(1, 11):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step6")
tdSql.query('select * from tb1')
tdSql.checkRows(20)
tdLog.info("================= step7")
tdLog.info("import 10 data totally repetitive")
startTime = self.startTime + 5
sqlcmd = ['import into tb1 values']
for rid in range(1, 11):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step8")
tdSql.query('select * from tb1')
tdSql.checkRows(20)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db')
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info("================= step2")
tdLog.info("import 10 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 11):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(10)
tdLog.info("================= step4")
tdDnodes.stop(1)
tdLog.sleep(5)
tdDnodes.start(1)
tdLog.info("================= step5")
tdLog.info("import 10 data again")
startTime = self.startTime + 10
sqlcmd = ['import into tb1 values']
for rid in range(1, 11):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step6")
tdSql.query('select * from tb1')
tdSql.checkRows(20)
tdLog.info("================= step7")
tdLog.info("import 1 data later")
startTime = self.startTime + 20
tdSql.execute('import into tb1 values(%ld, %d)' % (startTime + 1, rid))
tdLog.info("================= step8")
tdSql.query('select * from tb1')
tdSql.checkRows(21)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db')
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info("================= step2")
tdLog.info("import 10 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 11):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(10)
tdLog.info("================= step4")
tdDnodes.stop(1)
tdLog.sleep(5)
tdDnodes.start(1)
tdLog.info("================= step5")
tdLog.info("import 10 data again")
startTime = self.startTime + 10
sqlcmd = ['import into tb1 values']
for rid in range(1, 11):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step6")
tdSql.query('select * from tb1')
tdSql.checkRows(20)
tdLog.info("================= step7")
tdLog.info("import 10 data later with overlap")
startTime = self.startTime + 15
sqlcmd = ['import into tb1 values']
for rid in range(1, 11):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step8")
tdSql.query('select * from tb1')
tdSql.checkRows(25)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db')
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info("================= step2")
tdLog.info("import 9 sequential data with gap")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 11):
if (rid == 7):
continue
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(9)
tdLog.info("================= step4")
tdDnodes.stop(1)
tdLog.sleep(5)
tdDnodes.start(1)
tdLog.info("================= step5")
tdLog.info("import 9 data again with gap")
startTime = self.startTime + 10
sqlcmd = ['import into tb1 values']
for rid in range(1, 11):
if (rid == 7):
continue
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step6")
tdSql.query('select * from tb1')
tdSql.checkRows(18)
tdLog.info("================= step7")
tdLog.info("import 20 data later with partly overlap")
startTime = self.startTime + 5
sqlcmd = ['import into tb1 values']
for rid in range(1, 21):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step8")
tdSql.query('select * from tb1')
tdSql.checkRows(25)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
self.rows = 200
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db rows %d' % self.rows)
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info(
"More than 10 rows less than %d rows will go to data file" %
self.rows)
tdLog.info("================= step2")
tdLog.info("import %d sequential data" % (self.rows / 2))
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, self.rows / 2 + 1):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(self.rows / 2)
tdLog.info("================= step4")
tdDnodes.stop(1)
tdLog.sleep(5)
tdDnodes.start(1)
tdLog.info("================= step5")
tdLog.info("import 1 data before")
startTime = self.startTime - 1
tdSql.execute('import into tb1 values(%ld, %d)' % (startTime + 1, 1))
tdLog.info("================= step7")
tdSql.execute('reset query cache')
tdSql.query('select * from tb1 order by ts desc')
tdSql.checkRows(self.rows / 2 + 1)
tdLog.info("================= step8")
tdLog.info("import 10 data in batch before")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 11):
sqlcmd.append('(%ld, %d)' % (startTime - rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step9")
tdSql.execute('reset query cache')
tdSql.query('select * from tb1 order by ts desc')
tdSql.checkRows(self.rows / 2 + 11)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
self.rows = 200
self.rowsPerTable = 20
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db rows %d' % self.rows)
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info(
"More than 10 rows less than %d rows will go to data file" %
self.rows)
tdLog.info("================= step2")
tdLog.info("import %d sequential data" % self.rowsPerTable)
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, self.rowsPerTable + 1):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select count(*) from tb1')
tdSql.checkData(0, 0, self.rowsPerTable)
tdLog.info("================= step4")
tdDnodes.stop(1)
tdLog.sleep(5)
tdDnodes.start(1)
tdLog.info("================= step5")
tdLog.info("import 6 data before with overlap")
startTime = self.startTime - 3
sqlcmd = ['import into tb1 values']
for rid in range(1, 7):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step6")
tdSql.query('select * from tb1 order by ts desc')
tdSql.checkRows(self.rowsPerTable + 3)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
self.rows = 200
self.rowsPerTable = 100
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db rows %d' % self.rows)
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info(
"More than 10 rows less than %d rows will go to data file" %
self.rows)
tdLog.info("================= step2")
tdLog.info("import %d sequential data" % self.rowsPerTable)
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, self.rowsPerTable + 1):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select count(*) from tb1')
tdSql.checkData(0, 0, self.rowsPerTable)
tdLog.info("================= step4")
tdDnodes.stop(1)
tdLog.sleep(5)
tdDnodes.start(1)
tdLog.info("================= step5")
tdLog.info("import 6 data before with overlap")
startTime = self.startTime - 3
sqlcmd = ['import into tb1 values']
for rid in range(6, 0, -1):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step6")
tdSql.query('select * from tb1 order by ts desc')
tdSql.checkRows(self.rowsPerTable + 3)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
self.rows = 200
self.rowsPerTable = 20
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db rows %d' % self.rows)
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info(
"More than 10 rows less than %d rows will go to data file" %
self.rows)
tdLog.info("================= step2")
tdLog.info("import %d sequential data" % self.rowsPerTable)
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 10):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
for rid in range(14, self.rowsPerTable + 5):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select count(*) from tb1')
tdSql.checkData(0, 0, self.rowsPerTable)
tdLog.info("================= step4")
tdDnodes.stop(1)
tdLog.sleep(5)
tdDnodes.start(1)
tdLog.info("================= step5")
tdLog.info("import 20 data before with partly overlap")
startTime = self.startTime - 4
sqlcmd = ['import into tb1 values']
for rid in range(1, 21):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step6")
tdSql.query('select * from tb1')
tdSql.checkRows(self.rowsPerTable + 8)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
self.rows = 200
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db rows %d' % self.rows)
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info(
"More than %d rows less than %d rows will go to data and last file" %
(self.rows, 10 + self.rows))
tdLog.info("================= step2")
tdLog.info("import 205 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 206):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(205)
tdLog.info("================= step4")
tdDnodes.stop(1)
tdLog.sleep(5)
tdDnodes.start(1)
tdLog.info("================= step5")
tdLog.info("import 1 data before")
startTime = self.startTime - 1
tdSql.execute('import into tb1 values(%ld, %d)' % (startTime + 1, 1))
tdLog.info("================= step6")
tdSql.query('select * from tb1')
tdSql.checkRows(206)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
self.rows = 200
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db rows %d' % self.rows)
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info(
"More than %d rows less than %d rows will go to data and last file" %
(self.rows, 10 + self.rows))
tdLog.info("================= step2")
tdLog.info("import 205 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 206):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(205)
tdLog.info("================= step4")
tdDnodes.stop(1)
tdLog.sleep(5)
tdDnodes.start(1)
tdLog.info("================= step5")
tdLog.info("import 5 data before with overlap")
startTime = self.startTime - 2
sqlcmd = ['import into tb1 values']
for rid in range(1, 6):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step6")
tdSql.query('select * from tb1')
tdSql.checkRows(207)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
self.rows = 200
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db rows %d' % self.rows)
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info(
"More than %d rows less than %d rows will go to data and last file" %
(self.rows, 10 + self.rows))
tdLog.info("================= step2")
tdLog.info("import 205 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 11):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
for rid in range(14, 209):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(205)
tdLog.info("================= step4")
tdDnodes.stop(1)
tdLog.sleep(5)
tdDnodes.start(1)
tdLog.info("================= step5")
tdLog.info("import 20 data before with partly overlap")
startTime = self.startTime - 2
sqlcmd = ['import into tb1 values']
for rid in range(1, 21):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step6")
tdSql.query('select * from tb1')
tdSql.checkRows(210)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
self.rows = 200
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db rows %d' % self.rows)
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info(
"More than %d rows less than %d rows will go to data and last file" %
(self.rows, 10 + self.rows))
tdLog.info("================= step2")
tdLog.info("import 205 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 206):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(205)
tdLog.info("================= step4")
tdDnodes.stop(1)
tdLog.sleep(5)
tdDnodes.start(1)
tdLog.info("================= step5")
tdLog.info("import 250 data covering the existing data")
startTime = self.startTime - 15
sqlcmd = ['import into tb1 values']
for rid in range(1, 251):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step6")
tdSql.query('select * from tb1')
tdSql.checkRows(250)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
self.rows = 200
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db rows %d' % self.rows)
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info(
"More than %d rows less than %d rows will go to data and last file" %
(self.rows, 10 + self.rows))
tdLog.info("================= step2")
tdLog.info("import 205 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 206):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(205)
tdLog.info("================= step4")
tdDnodes.stop(1)
tdLog.sleep(5)
tdDnodes.start(1)
tdLog.info("================= step5")
tdLog.info("import 10 data totally repetitive")
startTime = self.startTime + 10
sqlcmd = ['import into tb1 values']
for rid in range(1, 11):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step6")
tdSql.query('select * from tb1')
tdSql.checkRows(205)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
self.rows = 200
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db rows %d' % self.rows)
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info(
"More than %d rows less than %d rows will go to data and last file" %
(self.rows, 10 + self.rows))
tdLog.info("================= step2")
tdLog.info("import 205 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 206):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(205)
tdLog.info("================= step4")
tdDnodes.stop(1)
tdLog.sleep(5)
tdDnodes.start(1)
tdLog.info("================= step5")
tdLog.info("import 1 data later")
startTime = self.startTime + 205
tdSql.execute('import into tb1 values(%ld, %d)' % (startTime + 1, 1))
tdLog.info("================= step6")
tdSql.query('select * from tb1')
tdSql.checkRows(206)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
self.rows = 200
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db rows %d' % self.rows)
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info(
"More than %d rows less than %d rows will go to data and last file" %
(self.rows, 10 + self.rows))
tdLog.info("================= step2")
tdLog.info("import 205 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 206):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(205)
tdLog.info("================= step4")
tdDnodes.stop(1)
tdLog.sleep(5)
tdDnodes.start(1)
tdLog.info("================= step5")
tdLog.info("import 5 data later with overlap")
startTime = self.startTime + 203
sqlcmd = ['import into tb1 values']
for rid in range(1, 6):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step6")
tdSql.query('select * from tb1')
tdSql.checkRows(208)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
self.rows = 200
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db rows %d' % self.rows)
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info(
"More than %d rows less than %d rows will go to data and last file" %
(self.rows, 10 + self.rows))
tdLog.info("================= step2")
tdLog.info("import 205 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 196):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
for rid in range(200, 210):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(205)
tdLog.info("================= step4")
tdDnodes.stop(1)
tdLog.sleep(5)
tdDnodes.start(1)
tdLog.info("================= step5")
tdLog.info("import 20 data later with partly overlap")
startTime = self.startTime + 192
sqlcmd = ['import into tb1 values']
for rid in range(1, 21):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step6")
tdSql.query('select * from tb1')
tdSql.checkRows(212)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
self.rows = 200
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db rows %d' % self.rows)
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info(
"More than 10 rows less than %d rows will go to data file" %
self.rows)
tdLog.info("================= step2")
tdLog.info("import 20 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 21):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(20)
tdLog.info("================= step4")
tdDnodes.stop(1)
tdLog.sleep(5)
tdDnodes.start(1)
tdLog.info("================= step5")
tdLog.info("import 30 data covering the existing data")
startTime = self.startTime - 5
sqlcmd = ['import into tb1 values']
for rid in range(1, 31):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step6")
tdSql.query('select * from tb1')
tdSql.checkRows(30)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
self.rows = 200
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db rows %d' % self.rows)
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info(
"More than 10 rows less than %d rows will go to data file" %
self.rows)
tdLog.info("================= step2")
tdLog.info("import %d sequential data" % (self.rows / 2))
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, self.rows / 2 + 1):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(self.rows / 2)
tdLog.info("================= step4")
tdDnodes.stop(1)
tdLog.sleep(5)
tdDnodes.start(1)
tdLog.info("================= step5")
tdLog.info("import 10 data totally repetitive")
startTime = self.startTime + 10
sqlcmd = ['import into tb1 values']
for rid in range(1, 11):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step9")
tdSql.execute('reset query cache')
tdSql.query('select * from tb1 order by ts desc')
tdSql.checkRows(self.rows / 2)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
self.rows = 200
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db rows %d' % self.rows)
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info(
"More than 10 rows less than %d rows will go to data file" %
self.rows)
tdLog.info("================= step2")
tdLog.info("import 20 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 21):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(20)
tdLog.info("================= step4")
tdDnodes.stop(1)
tdLog.sleep(5)
tdDnodes.start(1)
tdLog.info("================= step5")
tdLog.info("import 1 data later")
startTime = self.startTime + 20
tdSql.execute('import into tb1 values(%ld, %d)' % (startTime + 1, 1))
tdLog.info("================= step6")
tdSql.query('select * from tb1')
tdSql.checkRows(21)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
self.rows = 200
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db rows %d' % self.rows)
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info(
"More than 10 rows less than %d rows will go to data file" %
self.rows)
tdLog.info("================= step2")
tdLog.info("import 20 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 21):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(20)
tdLog.info("================= step4")
tdDnodes.stop(1)
tdLog.sleep(5)
tdDnodes.start(1)
tdLog.info("================= step5")
tdLog.info("import 6 data later with overlap")
startTime = self.startTime + 18
sqlcmd = ['import into tb1 values']
for rid in range(1, 7):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step6")
tdSql.query('select * from tb1')
tdSql.checkRows(24)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 1
self.startTime = 1520000010000
self.rows = 200
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db rows %d' % self.rows)
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 table")
tdSql.execute('create table tb1 (ts timestamp, speed int)')
tdLog.info(
"More than 10 rows less than %d rows will go to data file" %
self.rows)
tdLog.info("================= step2")
tdLog.info("import 20 sequential data")
startTime = self.startTime
sqlcmd = ['import into tb1 values']
for rid in range(1, 18):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
for rid in range(22, 25):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step3")
tdSql.query('select * from tb1')
tdSql.checkRows(20)
tdLog.info("================= step4")
tdDnodes.stop(1)
tdLog.sleep(5)
tdDnodes.start(1)
tdLog.sleep(5)
tdLog.info("================= step5")
tdLog.info("import 20 data later with partly overlap")
startTime = self.startTime + 15
sqlcmd = ['import into tb1 values']
for rid in range(1, 21):
sqlcmd.append('(%ld, %d)' % (startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdLog.info("================= step6")
tdSql.query('select * from tb1')
tdSql.checkRows(35)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
...@@ -25,7 +25,12 @@ if [ "$totalFailed" -ne "0" ]; then ...@@ -25,7 +25,12 @@ if [ "$totalFailed" -ne "0" ]; then
fi fi
cd ../pytest cd ../pytest
./simpletest.sh 2>&1 | tee pytest-out.txt
if [ "$1" == "cron" ]; then
./fulltest.sh 2>&1 | tee pytest-out.txt
else
./smoketest.sh 2>&1 | tee pytest-out.txt
fi
totalPySuccess=`grep 'successfully executed' pytest-out.txt | wc -l` totalPySuccess=`grep 'successfully executed' pytest-out.txt | wc -l`
if [ "$totalPySuccess" -gt "0" ]; then if [ "$totalPySuccess" -gt "0" ]; then
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册