提交 03161e35 编写于 作者: H Hongze Cheng

Merge branch 'develop' into feature/2.0tsdb

......@@ -26,6 +26,7 @@ matrix:
- python3-pip
- python3-setuptools
- valgrind
- psmisc
before_script:
- cd ${TRAVIS_BUILD_DIR}
......@@ -142,6 +143,7 @@ matrix:
- python3-pip
- python3-setuptools
- lcov
- psmisc
before_script:
- cd ${TRAVIS_BUILD_DIR}
......
from .cinterface import CTaosInterface
from .error import *
from .constants import FieldType
class TDengineCursor(object):
"""Database cursor which is used to manage the context of a fetch operation.
......@@ -19,7 +21,7 @@ class TDengineCursor(object):
if the cursor has not had an operation invoked via the .execute*() method yet.
.rowcount:This read-only attribute specifies the number of rows that the last
.execute*() produced (for DQL statements like SELECT) or affected
.execute*() produced (for DQL statements like SELECT) or affected
"""
def __init__(self, connection=None):
......@@ -44,13 +46,14 @@ class TDengineCursor(object):
raise OperationalError("Invalid use of fetch iterator")
if self._block_rows <= self._block_iter:
block, self._block_rows = CTaosInterface.fetchBlock(self._result, self._fields)
block, self._block_rows = CTaosInterface.fetchBlock(
self._result, self._fields)
if self._block_rows == 0:
raise StopIteration
self._block = list(map(tuple, zip(*block)))
self._block_iter = 0
data = self._block[self._block_iter]
data = self._block[self._block_iter]
self._block_iter += 1
return data
......@@ -85,7 +88,7 @@ class TDengineCursor(object):
"""
if self._connection is None:
return False
self._connection.clear_result_set()
self._reset_result()
self._connection = None
......@@ -101,24 +104,28 @@ class TDengineCursor(object):
if not self._connection:
# TODO : change the exception raised here
raise ProgrammingError("Cursor is not connected")
self._connection.clear_result_set()
self._reset_result()
stmt = operation
if params is not None:
pass
res = CTaosInterface.query(self._connection._conn, stmt)
if res == 0:
if CTaosInterface.fieldsCount(self._connection._conn) == 0:
self._affected_rows += CTaosInterface.affectedRows(self._connection._conn)
self._affected_rows += CTaosInterface.affectedRows(
self._connection._conn)
return CTaosInterface.affectedRows(self._connection._conn)
else:
self._result, self._fields = CTaosInterface.useResult(self._connection._conn)
self._result, self._fields = CTaosInterface.useResult(
self._connection._conn)
return self._handle_result()
else:
raise ProgrammingError(CTaosInterface.errStr(self._connection._conn))
raise ProgrammingError(
CTaosInterface.errStr(
self._connection._conn))
def executemany(self, operation, seq_of_parameters):
"""Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters.
......@@ -130,6 +137,37 @@ class TDengineCursor(object):
"""
pass
def istype(self, col, dataType):
if (dataType.upper() == "BOOL"):
if (self._description[col][1] == FieldType.C_BOOL):
return True
if (dataType.upper() == "TINYINT"):
if (self._description[col][1] == FieldType.C_TINYINT):
return True
if (dataType.upper() == "INT"):
if (self._description[col][1] == FieldType.C_INT):
return True
if (dataType.upper() == "BIGINT"):
if (self._description[col][1] == FieldType.C_INT):
return True
if (dataType.upper() == "FLOAT"):
if (self._description[col][1] == FieldType.C_FLOAT):
return True
if (dataType.upper() == "DOUBLE"):
if (self._description[col][1] == FieldType.C_DOUBLE):
return True
if (dataType.upper() == "BINARY"):
if (self._description[col][1] == FieldType.C_BINARY):
return True
if (dataType.upper() == "TIMESTAMP"):
if (self._description[col][1] == FieldType.C_TIMESTAMP):
return True
if (dataType.upper() == "NCHAR"):
if (self._description[col][1] == FieldType.C_NCHAR):
return True
return False
def fetchmany(self):
pass
......@@ -138,21 +176,21 @@ class TDengineCursor(object):
"""
if self._result is None or self._fields is None:
raise OperationalError("Invalid use of fetchall")
buffer = [[] for i in range(len(self._fields))]
self._rowcount = 0
while True:
block, num_of_fields = CTaosInterface.fetchBlock(self._result, self._fields)
if num_of_fields == 0: break
block, num_of_fields = CTaosInterface.fetchBlock(
self._result, self._fields)
if num_of_fields == 0:
break
self._rowcount += num_of_fields
for i in range(len(self._fields)):
buffer[i].extend(block[i])
self._connection.clear_result_set()
return list(map(tuple, zip(*buffer)))
return list(map(tuple, zip(*buffer)))
def nextset(self):
"""
......@@ -176,12 +214,13 @@ class TDengineCursor(object):
self._block_rows = -1
self._block_iter = 0
self._affected_rows = 0
def _handle_result(self):
"""Handle the return result from query.
"""
self._description = []
for ele in self._fields:
self._description.append((ele['name'], ele['type'], None, None, None, None, False))
self._description.append(
(ele['name'], ele['type'], None, None, None, None, False))
return self._result
from .cinterface import CTaosInterface
from .error import *
from .constants import FieldType
# querySeqNum = 0
class TDengineCursor(object):
"""Database cursor which is used to manage the context of a fetch operation.
......@@ -21,7 +23,7 @@ class TDengineCursor(object):
if the cursor has not had an operation invoked via the .execute*() method yet.
.rowcount:This read-only attribute specifies the number of rows that the last
.execute*() produced (for DQL statements like SELECT) or affected
.execute*() produced (for DQL statements like SELECT) or affected
"""
def __init__(self, connection=None):
......@@ -46,13 +48,14 @@ class TDengineCursor(object):
raise OperationalError("Invalid use of fetch iterator")
if self._block_rows <= self._block_iter:
block, self._block_rows = CTaosInterface.fetchBlock(self._result, self._fields)
block, self._block_rows = CTaosInterface.fetchBlock(
self._result, self._fields)
if self._block_rows == 0:
raise StopIteration
self._block = list(map(tuple, zip(*block)))
self._block_iter = 0
data = self._block[self._block_iter]
data = self._block[self._block_iter]
self._block_iter += 1
return data
......@@ -87,7 +90,7 @@ class TDengineCursor(object):
"""
if self._connection is None:
return False
self._connection.clear_result_set()
self._reset_result()
self._connection = None
......@@ -103,14 +106,13 @@ class TDengineCursor(object):
if not self._connection:
# TODO : change the exception raised here
raise ProgrammingError("Cursor is not connected")
self._connection.clear_result_set()
self._reset_result()
stmt = operation
if params is not None:
pass
# global querySeqNum
# querySeqNum += 1
......@@ -121,13 +123,17 @@ class TDengineCursor(object):
if res == 0:
if CTaosInterface.fieldsCount(self._connection._conn) == 0:
self._affected_rows += CTaosInterface.affectedRows(self._connection._conn)
self._affected_rows += CTaosInterface.affectedRows(
self._connection._conn)
return CTaosInterface.affectedRows(self._connection._conn)
else:
self._result, self._fields = CTaosInterface.useResult(self._connection._conn)
self._result, self._fields = CTaosInterface.useResult(
self._connection._conn)
return self._handle_result()
else:
raise ProgrammingError(CTaosInterface.errStr(self._connection._conn))
raise ProgrammingError(
CTaosInterface.errStr(
self._connection._conn))
def executemany(self, operation, seq_of_parameters):
"""Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters.
......@@ -142,26 +148,57 @@ class TDengineCursor(object):
def fetchmany(self):
pass
def istype(self, col, dataType):
if (dataType.upper() == "BOOL"):
if (self._description[col][1] == FieldType.C_BOOL):
return True
if (dataType.upper() == "TINYINT"):
if (self._description[col][1] == FieldType.C_TINYINT):
return True
if (dataType.upper() == "INT"):
if (self._description[col][1] == FieldType.C_INT):
return True
if (dataType.upper() == "BIGINT"):
if (self._description[col][1] == FieldType.C_INT):
return True
if (dataType.upper() == "FLOAT"):
if (self._description[col][1] == FieldType.C_FLOAT):
return True
if (dataType.upper() == "DOUBLE"):
if (self._description[col][1] == FieldType.C_DOUBLE):
return True
if (dataType.upper() == "BINARY"):
if (self._description[col][1] == FieldType.C_BINARY):
return True
if (dataType.upper() == "TIMESTAMP"):
if (self._description[col][1] == FieldType.C_TIMESTAMP):
return True
if (dataType.upper() == "NCHAR"):
if (self._description[col][1] == FieldType.C_NCHAR):
return True
return False
def fetchall(self):
"""Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation.
"""
if self._result is None or self._fields is None:
raise OperationalError("Invalid use of fetchall")
buffer = [[] for i in range(len(self._fields))]
self._rowcount = 0
while True:
block, num_of_fields = CTaosInterface.fetchBlock(self._result, self._fields)
if num_of_fields == 0: break
block, num_of_fields = CTaosInterface.fetchBlock(
self._result, self._fields)
if num_of_fields == 0:
break
self._rowcount += num_of_fields
for i in range(len(self._fields)):
buffer[i].extend(block[i])
self._connection.clear_result_set()
return list(map(tuple, zip(*buffer)))
return list(map(tuple, zip(*buffer)))
def nextset(self):
"""
......@@ -185,12 +222,13 @@ class TDengineCursor(object):
self._block_rows = -1
self._block_iter = 0
self._affected_rows = 0
def _handle_result(self):
"""Handle the return result from query.
"""
self._description = []
for ele in self._fields:
self._description.append((ele['name'], ele['type'], None, None, None, None, False))
self._description.append(
(ele['name'], ele['type'], None, None, None, None, False))
return self._result
......@@ -95,7 +95,7 @@ typedef void* tsync_h;
tsync_h syncStart(const SSyncInfo *);
void syncStop(tsync_h shandle);
int syncReconfig(tsync_h shandle, const SSyncCfg *);
int syncForwardToPeer(tsync_h shandle, void *pHead, void *mhandle);
int syncForwardToPeer(tsync_h shandle, void *pHead, void *mhandle, int qtype);
void syncConfirmForward(tsync_h shandle, uint64_t version, int32_t code);
void syncRecover(tsync_h shandle); // recover from other nodes:
int syncGetNodesRole(tsync_h shandle, SNodesRole *);
......
......@@ -227,7 +227,7 @@ static void sdbConfirmForward(void *ahandle, void *param, int32_t code) {
static int32_t sdbForwardToPeer(SWalHead *pHead) {
if (tsSdbObj.sync == NULL) return TSDB_CODE_SUCCESS;
int32_t code = syncForwardToPeer(tsSdbObj.sync, pHead, (void*)pHead->version);
int32_t code = syncForwardToPeer(tsSdbObj.sync, pHead, (void*)pHead->version, TAOS_QTYPE_RPC);
if (code > 0) {
sdbTrace("forward request is sent, version:%" PRIu64 ", code:%d", pHead->version, code);
sem_wait(&tsSdbObj.sem);
......
......@@ -269,7 +269,6 @@ static int32_t mgmtChildTableActionRestored() {
SChildTableObj *pTable = NULL;
while (1) {
mgmtDecTableRef(pTable);
pIter = mgmtGetNextChildTable(pIter, &pTable);
if (pTable == NULL) break;
......@@ -278,6 +277,7 @@ static int32_t mgmtChildTableActionRestored() {
mError("ctable:%s, failed to get db, discard it", pTable->info.tableId);
SSdbOper desc = {.type = SDB_OPER_LOCAL, .pObj = pTable, .table = tsChildTableSdb};
sdbDeleteRow(&desc);
mgmtDecTableRef(pTable);
continue;
}
mgmtDecDbRef(pDb);
......@@ -288,6 +288,7 @@ static int32_t mgmtChildTableActionRestored() {
pTable->vgId = 0;
SSdbOper desc = {.type = SDB_OPER_LOCAL, .pObj = pTable, .table = tsChildTableSdb};
sdbDeleteRow(&desc);
mgmtDecTableRef(pTable);
continue;
}
mgmtDecVgroupRef(pVgroup);
......@@ -298,6 +299,7 @@ static int32_t mgmtChildTableActionRestored() {
pTable->vgId = 0;
SSdbOper desc = {.type = SDB_OPER_LOCAL, .pObj = pTable, .table = tsChildTableSdb};
sdbDeleteRow(&desc);
mgmtDecTableRef(pTable);
continue;
}
......@@ -306,6 +308,7 @@ static int32_t mgmtChildTableActionRestored() {
pTable->vgId = 0;
SSdbOper desc = {.type = SDB_OPER_LOCAL, .pObj = pTable, .table = tsChildTableSdb};
sdbDeleteRow(&desc);
mgmtDecTableRef(pTable);
continue;
}
......@@ -316,10 +319,13 @@ static int32_t mgmtChildTableActionRestored() {
pTable->vgId = 0;
SSdbOper desc = {.type = SDB_OPER_LOCAL, .pObj = pTable, .table = tsChildTableSdb};
sdbDeleteRow(&desc);
mgmtDecTableRef(pTable);
continue;
}
mgmtDecTableRef(pSuperTable);
}
mgmtDecTableRef(pTable);
}
sdbFreeIter(pIter);
......@@ -1136,19 +1142,20 @@ int32_t mgmtRetrieveShowSuperTables(SShowObj *pShow, char *data, int32_t rows, v
char stableName[TSDB_TABLE_NAME_LEN] = {0};
while (numOfRows < rows) {
mgmtDecTableRef(pTable);
pShow->pIter = mgmtGetNextSuperTable(pShow->pIter, &pTable);
if (pTable == NULL) break;
if (strncmp(pTable->info.tableId, prefix, prefixLen)) {
mgmtDecTableRef(pTable);
continue;
}
memset(stableName, 0, tListLen(stableName));
mgmtExtractTableName(pTable->info.tableId, stableName);
if (pShow->payloadLen > 0 &&
patternMatch(pShow->payload, stableName, TSDB_TABLE_NAME_LEN, &info) != TSDB_PATTERN_MATCH)
if (pShow->payloadLen > 0 && patternMatch(pShow->payload, stableName, TSDB_TABLE_NAME_LEN, &info) != TSDB_PATTERN_MATCH) {
mgmtDecTableRef(pTable);
continue;
}
cols = 0;
......@@ -1178,6 +1185,7 @@ int32_t mgmtRetrieveShowSuperTables(SShowObj *pShow, char *data, int32_t rows, v
cols++;
numOfRows++;
mgmtDecTableRef(pTable);
}
pShow->numOfReads += numOfRows;
......@@ -1475,7 +1483,7 @@ static SChildTableObj* mgmtDoCreateChildTable(SCMCreateTableMsg *pCreate, SVgObj
return NULL;
}
mTrace("table:%s, create table in vgroup, id:%d, uid:%" PRIu64 , pTable->info.tableId, pTable->sid, pTable->uid);
mTrace("table:%s, create table in vgroup:%d, id:%d, uid:%" PRIu64 , pTable->info.tableId, pVgroup->vgId, pTable->sid, pTable->uid);
return pTable;
}
......@@ -2106,12 +2114,12 @@ static int32_t mgmtRetrieveShowTables(SShowObj *pShow, char *data, int32_t rows,
int32_t prefixLen = strlen(prefix);
while (numOfRows < rows) {
mgmtDecTableRef(pTable);
pShow->pIter = mgmtGetNextChildTable(pShow->pIter, &pTable);
if (pTable == NULL) break;
// not belong to current db
if (strncmp(pTable->info.tableId, prefix, prefixLen)) {
mgmtDecTableRef(pTable);
continue;
}
......@@ -2120,8 +2128,8 @@ static int32_t mgmtRetrieveShowTables(SShowObj *pShow, char *data, int32_t rows,
// pattern compare for table name
mgmtExtractTableName(pTable->info.tableId, tableName);
if (pShow->payloadLen > 0 &&
patternMatch(pShow->payload, tableName, TSDB_TABLE_NAME_LEN, &info) != TSDB_PATTERN_MATCH) {
if (pShow->payloadLen > 0 && patternMatch(pShow->payload, tableName, TSDB_TABLE_NAME_LEN, &info) != TSDB_PATTERN_MATCH) {
mgmtDecTableRef(pTable);
continue;
}
......@@ -2156,6 +2164,7 @@ static int32_t mgmtRetrieveShowTables(SShowObj *pShow, char *data, int32_t rows,
cols++;
numOfRows++;
mgmtDecTableRef(pTable);
}
pShow->numOfReads += numOfRows;
......
......@@ -586,6 +586,7 @@ static int tsdbRemoveTableFromIndex(STsdbMeta *pMeta, STable *pTable) {
}
}
taosArrayDestroy(res);
return 0;
}
......
......@@ -1466,8 +1466,8 @@ static int tsdbReadRowsFromCache(SSkipListIterator* pIter, STable* pTable, TSKEY
}
}
numOfRows++;
if (numOfRows >= maxRowsToRead) {
if (++numOfRows >= maxRowsToRead) {
tSkipListIterNext(pIter);
break;
}
......
......@@ -416,12 +416,12 @@ void getTmpfilePath(const char *fileNamePrefix, char *dstPath) {
#else
char *tmpDir = "/tmp/";
#endif
int64_t ts = taosGetTimestampUs();
strcpy(tmpPath, tmpDir);
strcat(tmpPath, tdengineTmpFileNamePrefix);
strcat(tmpPath, fileNamePrefix);
strcat(tmpPath, "-%llu-%u");
snprintf(dstPath, PATH_MAX, tmpPath, taosGetPthreadId(), atomic_add_fetch_32(&tmpFileSerialNum, 1));
strcat(tmpPath, "-%d-%"PRIu64"-%u-%"PRIu64);
snprintf(dstPath, PATH_MAX, tmpPath, getpid(), taosGetPthreadId(), atomic_add_fetch_32(&tmpFileSerialNum, 1), ts);
}
int tasoUcs4Compare(void* f1_ucs4, void *f2_ucs4, int bytes) {
......
......@@ -46,7 +46,7 @@ static pthread_once_t vnodeModuleInit = PTHREAD_ONCE_INIT;
#ifndef _SYNC
tsync_h syncStart(const SSyncInfo *info) { return NULL; }
int syncForwardToPeer(tsync_h shandle, void *pHead, void *mhandle) { return 0; }
int syncForwardToPeer(tsync_h shandle, void *pHead, void *mhandle, int qtype) { return 0; }
void syncStop(tsync_h shandle) {}
int syncReconfig(tsync_h shandle, const SSyncCfg * cfg) { return 0; }
int syncGetNodesRole(tsync_h shandle, SNodesRole * cfg) { return 0; }
......
......@@ -72,10 +72,9 @@ int32_t vnodeProcessWrite(void *param1, int qtype, void *param2, void *item) {
code = walWrite(pVnode->wal, pHead);
if (code < 0) return code;
// forward to peers if data is from RPC or CQ
// forward to peers, even it is WAL/FWD, it shall be called to update version in sync
int32_t syncCode = 0;
if (qtype == TAOS_QTYPE_RPC || qtype == TAOS_QTYPE_CQ)
syncCode = syncForwardToPeer(pVnode->sync, pHead, item);
syncCode = syncForwardToPeer(pVnode->sync, pHead, item, qtype);
if (syncCode < 0) return syncCode;
// write data locally
......
......@@ -22,6 +22,32 @@ python3 ./test.py $1 -f table/tablename-boundary.py
# tag
python3 ./test.py $1 -f tag_lite/filter.py
python3 ./test.py $1 -f tag_lite/create-tags-boundary.py
python3 ./test.py $1 -f tag_lite/3.py
python3 ./test.py $1 -f tag_lite/4.py
python3 ./test.py $1 -f tag_lite/5.py
python3 ./test.py $1 -f tag_lite/6.py
python3 ./test.py $1 -f tag_lite/add.py
python3 ./test.py $1 -f tag_lite/bigint.py
python3 ./test.py $1 -f tag_lite/binary_binary.py
python3 ./test.py $1 -f tag_lite/binary.py
python3 ./test.py $1 -f tag_lite/bool_binary.py
python3 ./test.py $1 -f tag_lite/bool_int.py
python3 ./test.py $1 -f tag_lite/bool.py
python3 ./test.py $1 -f tag_lite/change.py
python3 ./test.py $1 -f tag_lite/column.py
python3 ./test.py $1 -f tag_lite/commit.py
python3 ./test.py $1 -f tag_lite/create.py
python3 ./test.py $1 -f tag_lite/datatype.py
python3 ./test.py $1 -f tag_lite/datatype-without-alter.py
python3 ./test.py $1 -f tag_lite/delete.py
python3 ./test.py $1 -f tag_lite/double.py
python3 ./test.py $1 -f tag_lite/float.py
python3 ./test.py $1 -f tag_lite/int_binary.py
python3 ./test.py $1 -f tag_lite/int_float.py
python3 ./test.py $1 -f tag_lite/int.py
python3 ./test.py $1 -f tag_lite/set.py
python3 ./test.py $1 -f tag_lite/smallint.py
python3 ./test.py $1 -f tag_lite/tinyint.py
python3 ./test.py $1 -f dbmgmt/database-name-boundary.py
......
......@@ -34,7 +34,7 @@ class TDTestCase:
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('create database db cache 128')
tdSql.execute('use db')
tdLog.info("================= step1")
......
......@@ -34,7 +34,7 @@ class TDTestCase:
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('create database db cache 128')
tdSql.execute('use db')
tdLog.info("================= step1")
......
......@@ -34,7 +34,7 @@ class TDTestCase:
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('create database db cache 128')
tdSql.execute('use db')
tdLog.info("================= step1")
......
......@@ -34,7 +34,7 @@ class TDTestCase:
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('create database db cache 128')
tdSql.execute('use db')
tdLog.info("================= step1")
......
......@@ -34,7 +34,7 @@ class TDTestCase:
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('create database db cache 128')
tdSql.execute('use db')
tdLog.info("================= step1")
......
......@@ -34,7 +34,7 @@ class TDTestCase:
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('create database db cache 128')
tdSql.execute('use db')
tdLog.info("================= step1")
......
......@@ -34,7 +34,7 @@ class TDTestCase:
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('create database db cache 128')
tdSql.execute('use db')
tdLog.info("================= step1")
......
......@@ -34,7 +34,7 @@ class TDTestCase:
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('create database db cache 128')
tdSql.execute('use db')
tdLog.info("================= step1")
......
......@@ -34,7 +34,7 @@ class TDTestCase:
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('create database db cache 128')
tdSql.execute('use db')
tdLog.info("================= step1")
......
......@@ -34,7 +34,7 @@ class TDTestCase:
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('create database db cache 128')
tdSql.execute('use db')
tdLog.info("================= step1")
......
......@@ -34,7 +34,7 @@ class TDTestCase:
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('create database db cache 128')
tdSql.execute('use db')
tdLog.info("================= step1")
......
......@@ -34,7 +34,7 @@ class TDTestCase:
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('create database db cache 128')
tdSql.execute('use db')
tdLog.info("================= step1")
......
......@@ -34,7 +34,7 @@ class TDTestCase:
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('create database db cache 128')
tdSql.execute('use db')
tdLog.info("================= step1")
......
......@@ -34,7 +34,7 @@ class TDTestCase:
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('create database db cache 128')
tdSql.execute('use db')
tdLog.info("================= step1")
......
......@@ -34,7 +34,7 @@ class TDTestCase:
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('create database db cache 128')
tdSql.execute('use db')
tdLog.info("================= step1")
......
......@@ -34,7 +34,7 @@ class TDTestCase:
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('create database db cache 128')
tdSql.execute('use db')
tdLog.info("================= step1")
......
......@@ -34,7 +34,7 @@ class TDTestCase:
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512')
tdSql.execute('create database db cache 128')
tdSql.execute('use db')
tdLog.info("================= step1")
......
......@@ -33,7 +33,7 @@ class TDTestCase:
tdDnodes.start(1)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
tdSql.execute('create database db cache 512 maxtables 10')
tdSql.execute('create database db cache 128 maxtables 10')
tdSql.execute('use db')
tdLog.info("================= step1")
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -79,6 +79,9 @@ if __name__ == "__main__":
time.sleep(1)
processID = subprocess.check_output(psCmd, shell=True)
fuserCmd = "fuser -k -n tcp 6030"
os.system(fuserCmd)
tdLog.info('stop All dnodes')
sys.exit(0)
......
......@@ -23,7 +23,7 @@ class TDLog:
self.path = ""
def info(self, info):
print("%s %s" % (datetime.datetime.now(), info))
print("%s %s\n" % (datetime.datetime.now(), info))
def sleep(self, sec):
print("%s sleep %d seconds" % (datetime.datetime.now(), sec))
......
......@@ -77,6 +77,31 @@ class TDSql:
tdLog.info("sql:%s, queryRows:%d == expect:%d" %
(self.sql, self.queryRows, expectRows))
def checkDataType(self, row, col, dataType):
frame = inspect.stack()[1]
callerModule = inspect.getmodule(frame[0])
callerFilename = callerModule.__file__
if row < 0:
tdLog.exit(
"%s failed: sql:%s, row:%d is smaller than zero" %
(callerFilename, self.sql, row))
if col < 0:
tdLog.exit(
"%s failed: sql:%s, col:%d is smaller than zero" %
(callerFilename, self.sql, col))
if row > self.queryRows:
tdLog.exit(
"%s failed: sql:%s, row:%d is larger than queryRows:%d" %
(callerFilename, self.sql, row, self.queryRows))
if col > self.queryCols:
tdLog.exit(
"%s failed: sql:%s, col:%d is larger than queryCols:%d" %
(callerFilename, self.sql, col, self.queryCols))
return self.cursor.istype(col, dataType)
def checkData(self, row, col, data):
frame = inspect.stack()[1]
callerModule = inspect.getmodule(frame[0])
......
......@@ -123,7 +123,7 @@ sql insert into tb values(1520000025002, 25002)
sql insert into tb values(1520000060000, 60000)
sql select * from tb;
print $rows
if $rows != 24 then
if $rows != 23 then
return -1
endi
......@@ -156,7 +156,7 @@ sql import into tb values(1523110400000, 50001)
sql import into tb values(1521382400000, 500051)
sql select * from tb;
print $rows
if $rows != 36 then
if $rows != 35 then
return -1
endi
......@@ -169,7 +169,7 @@ sleep 5000
sql use ir1db
sql select * from tb;
print $rows
if $rows != 36 then
if $rows != 35 then
return -1
endi
......@@ -178,7 +178,7 @@ print ================= step11
#sql import into tb values(now-50d, 7003) (now-48d, 7003) (now-46d, 7003) (now-44d, 7003) (now-42d, 7003)
sql import into tb values(1515680000000, 7003) (1515852800000, 7003) (1516025600000, 7003) (1516198400000, 7003) (1516371200000, 7003)
sql select * from tb;
if $rows != 41 then
if $rows != 40 then
return -1
endi
......@@ -188,7 +188,7 @@ print ================= step12
sql import into tb values(1518358400000, 7003) (1518444800000, 7003) (1518531200000, 7003) (1518617600000, 7003) (1518704000000, 7003) (1518790400000, 7003) (1518876800000, 7003) (1518963200000, 7003) (1519049600000, 7003)
sql select * from tb;
print $rows
if $rows != 50 then
if $rows != 49 then
return -1
endi
......@@ -204,7 +204,7 @@ sql import into tb values(1516716800000, 50001)
sql import into tb values(1517580800000, 50001)
sql select * from tb;
if $rows != 50 then
if $rows != 52 then
return -1
endi
......
......@@ -32,7 +32,7 @@ cd ../../../debug; make
./test.sh -f general/compute/diff.sim
./test.sh -f general/compute/diff2.sim
./test.sh -f general/compute/first.sim
# liao ./test.sh -f general/compute/interval.sim
./test.sh -f general/compute/interval.sim
./test.sh -f general/compute/last.sim
./test.sh -f general/compute/leastsquare.sim
./test.sh -f general/compute/max.sim
......@@ -53,7 +53,7 @@ cd ../../../debug; make
./test.sh -f general/db/delete_reuse1.sim
./test.sh -f general/db/delete_reuse2.sim
./test.sh -f general/db/delete_reusevnode.sim
#hongze ./test.sh -f general/db/delete_reusevnode2.sim
./test.sh -f general/db/delete_reusevnode2.sim
./test.sh -f general/db/delete_writing1.sim
./test.sh -f general/db/delete_writing2.sim
./test.sh -f general/db/delete.sim
......@@ -103,9 +103,9 @@ cd ../../../debug; make
#unsupport ./test.sh -f general/parser/alter_stable.sim
./test.sh -f general/parser/auto_create_tb.sim
./test.sh -f general/parser/auto_create_tb_drop_tb.sim
./test.sh -f general/parser/col_arithmetic_operation.sim
#liao ./test.sh -f general/parser/col_arithmetic_operation.sim
./test.sh -f general/parser/columnValue.sim
./test.sh -f general/parser/commit.sim
#liao ./test.sh -f general/parser/commit.sim
# ./test.sh -f general/parser/create_db.sim
# ./test.sh -f general/parser/create_mt.sim
# ./test.sh -f general/parser/create_tb.sim
......@@ -130,7 +130,7 @@ cd ../../../debug; make
# ./test.sh -f general/parser/limit1_tblocks100.sim
# ./test.sh -f general/parser/limit2.sim
# ./test.sh -f general/parser/mixed_blocks.sim
# ./test.sh -f general/parser/selectResNum.sim
./test.sh -f general/parser/selectResNum.sim
# ./test.sh -f general/parser/select_across_vnodes.sim
# ./test.sh -f general/parser/set_tag_vals.sim
# ./test.sh -f general/parser/slimit.sim
......@@ -171,7 +171,7 @@ cd ../../../debug; make
./test.sh -f general/table/db.table.sim
./test.sh -f general/table/delete_reuse1.sim
./test.sh -f general/table/delete_reuse2.sim
#hongze ./test.sh -f general/table/delete_writing.sim
#liao ./test.sh -f general/table/delete_writing.sim
./test.sh -f general/table/describe.sim
./test.sh -f general/table/double.sim
./test.sh -f general/table/fill.sim
......@@ -253,12 +253,12 @@ cd ../../../debug; make
./test.sh -u -f unique/column/replica3.sim
#liao wait ./test.sh -u -f unique/db/commit.sim
./test.sh -u -f unique/db/commit.sim
./test.sh -u -f unique/db/delete.sim
./test.sh -u -f unique/db/delete_part.sim
./test.sh -u -f unique/db/replica_add12.sim
./test.sh -u -f unique/db/replica_add13.sim
#hongze wait ./test.sh -u -f unique/db/replica_add23.sim
./test.sh -u -f unique/db/replica_add23.sim
./test.sh -u -f unique/db/replica_reduce21.sim
./test.sh -u -f unique/db/replica_reduce32.sim
./test.sh -u -f unique/db/replica_reduce31.sim
......@@ -269,7 +269,7 @@ cd ../../../debug; make
./test.sh -u -f unique/dnode/balance3.sim
./test.sh -u -f unique/dnode/balancex.sim
./test.sh -u -f unique/dnode/offline1.sim
#hongze wait ./test.sh -u -f unique/dnode/offline2.sim
#jeff ./test.sh -u -f unique/dnode/offline2.sim
./test.sh -u -f unique/dnode/remove1.sim
#hongze ./test.sh -u -f unique/dnode/remove2.sim
./test.sh -u -f unique/dnode/vnode_clean.sim
......
......@@ -29,7 +29,7 @@ sleep 3000
sql create database d1 replica 2 maxTables 4
sql create table d1.t1(ts timestamp, i int)
sql insert into d1.t1 values(now, 1)
sql insert into d1.t1 values(1588262400001, 1)
sql show dnodes
print dnode1 $data4_1
......
......@@ -22,19 +22,19 @@ sql connect
sql create database d1 maxTables 4
sql create table d1.t1 (t timestamp, i int)
sql insert into d1.t1 values(now+1s, 15)
sql insert into d1.t1 values(now+2s, 14)
sql insert into d1.t1 values(now+3s, 13)
sql insert into d1.t1 values(now+4s, 12)
sql insert into d1.t1 values(now+5s, 11)
sql insert into d1.t1 values(1588262400001, 15)
sql insert into d1.t1 values(1588262400002, 14)
sql insert into d1.t1 values(1588262400003, 13)
sql insert into d1.t1 values(1588262400004, 12)
sql insert into d1.t1 values(1588262400005, 11)
sql create database d2 maxTables 4
sql create table d2.t2 (t timestamp, i int)
sql insert into d2.t2 values(now+1s, 25)
sql insert into d2.t2 values(now+2s, 24)
sql insert into d2.t2 values(now+3s, 23)
sql insert into d2.t2 values(now+4s, 22)
sql insert into d2.t2 values(now+5s, 21)
sql insert into d2.t2 values(1588262400001, 25)
sql insert into d2.t2 values(1588262400002, 24)
sql insert into d2.t2 values(1588262400003, 23)
sql insert into d2.t2 values(1588262400004, 22)
sql insert into d2.t2 values(1588262400005, 21)
sql show dnodes
print dnode1 openVnodes $data2_1
......@@ -49,11 +49,11 @@ sleep 9000
sql create database d3 replica 2 maxTables 4
sql create table d3.t3 (t timestamp, i int)
sql insert into d3.t3 values(now+1s, 35)
sql insert into d3.t3 values(now+2s, 34)
sql insert into d3.t3 values(now+3s, 33)
sql insert into d3.t3 values(now+4s, 32)
sql insert into d3.t3 values(now+5s, 31)
sql insert into d3.t3 values(1588262400001, 35)
sql insert into d3.t3 values(1588262400002, 34)
sql insert into d3.t3 values(1588262400003, 33)
sql insert into d3.t3 values(1588262400004, 32)
sql insert into d3.t3 values(1588262400005, 31)
$x = 0
show2:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册