提交 3539dd2b 编写于 作者: H Hongze Cheng

Merge branch 'develop' into hotfix/sync_loss_data

...@@ -281,6 +281,30 @@ SELECT select_expr [, select_expr ...] ...@@ -281,6 +281,30 @@ SELECT select_expr [, select_expr ...]
[LIMIT limit_val [, OFFSET offset_val]] [LIMIT limit_val [, OFFSET offset_val]]
[>> export_file] [>> export_file]
``` ```
说明:针对 insert 类型的 SQL 语句,我们采用的流式解析策略,在发现后面的错误之前,前面正确的部分SQL仍会执行。下面的sql中,insert语句是无效的,但是d1001仍会被创建。
```mysql
taos> create table meters(ts timestamp, current float, voltage int, phase float) tags(location binary(30), groupId int);
Query OK, 0 row(s) affected (0.008245s)
taos> show stables;
name | created_time | columns | tags | tables |
============================================================================================
meters | 2020-08-06 17:50:27.831 | 4 | 2 | 0 |
Query OK, 1 row(s) in set (0.001029s)
taos> show tables;
Query OK, 0 row(s) in set (0.000946s)
taos> insert into d1001 using meters tags('Beijing.Chaoyang', 2);
DB error: invalid SQL: keyword VALUES or FILE required
taos> show tables;
table_name | created_time | columns | stable_name |
======================================================================================================
d1001 | 2020-08-06 17:52:02.097 | 4 | meters |
Query OK, 1 row(s) in set (0.001091s)
```
#### SELECT子句 #### SELECT子句
一个选择子句可以是联合查询(UNION)和另一个查询的子查询(SUBQUERY)。 一个选择子句可以是联合查询(UNION)和另一个查询的子查询(SUBQUERY)。
...@@ -314,6 +338,7 @@ taos> SELECT * FROM meters; ...@@ -314,6 +338,7 @@ taos> SELECT * FROM meters;
Query OK, 9 row(s) in set (0.002022s) Query OK, 9 row(s) in set (0.002022s)
``` ```
通配符支持表名前缀,以下两个SQL语句均为返回全部的列: 通配符支持表名前缀,以下两个SQL语句均为返回全部的列:
```mysql ```mysql
SELECT * FROM d1001; SELECT * FROM d1001;
......
...@@ -5,20 +5,20 @@ ...@@ -5,20 +5,20 @@
# # # #
######################################################## ########################################################
# first full-qualified domain name (FQDN) for TDengine system # first fully qualified domain name (FQDN) for TDengine system
# firstEp hostname1:6030 # firstEp hostname1:6030
# second full-qualified domain name (FQDN) for TDengine system, for cluster edition only # second fully qualified domain name (FQDN) for TDengine system, for cluster only
# secondEp cluster_hostname2:6030 # secondEp cluster_hostname2:6030
# the full-qualified domain name (FQDN) of dnode # local fully qualified domain name (FQDN)
# fqdn hostname # fqdn hostname
# port for MNode connect to Client, default udp/tcp [6030-6040] # first port number for the connection (10 continuous UDP/TCP port number are used)
# serverPort 6030 # serverPort 6030
# http service port, default tcp [6020] # http service port, default tcp [6041]
# httpPort 6020 # httpPort 6041
# log file's directory # log file's directory
# logDir /var/log/taos # logDir /var/log/taos
...@@ -26,76 +26,73 @@ ...@@ -26,76 +26,73 @@
# data file's directory # data file's directory
# dataDir /var/lib/taos # dataDir /var/lib/taos
# the arbitrator's full-qualified domain name (FQDN) for TDengine system, for cluster edition only # the arbitrator's fully qualified domain name (FQDN) for TDengine system, for cluster only
# arbitrator arbitrator_hostname:6030 # arbitrator arbitrator_hostname:6030
# number of threads per CPU core # number of threads per CPU core
# numOfThreadsPerCore 1.0 # numOfThreadsPerCore 1.0
# the ratio of threads responsible for querying in the total thread
# ratioOfQueryThreads 0.5
# number of management nodes in the system # number of management nodes in the system
# numOfMnodes 3 # numOfMnodes 3
# if backup vnode directory when remove dnode # enable/disable backuping vnode directory when removing dnode
# vnodeBak 1 # vnodeBak 1
# Whether to start load balancing # enable/disable load balancing
# balance 1 # balance 1
# optional roles for dnode. 0 - any, 1 - mnode, 2 - dnode # role for dnode. 0 - any, 1 - mnode, 2 - dnode
# role 0 # role 0
# max timer control block # max timer control blocks
# maxTmrCtrl 512 # maxTmrCtrl 512
# interval of system monitor # time interval of system monitor, seconds
# monitorInterval 30 # monitorInterval 30
# number of seconds allowed for a dnode to be offline, for cluster version only # number of seconds allowed for a dnode to be offline, for cluster only
# offlineThreshold 8640000 # offlineThreshold 8640000
# RPC re-try timer, millisecond # RPC re-try timer, millisecond
# rpcTimer 300 # rpcTimer 300
# RPC maximum time for ack, seconds # RPC maximum time for ack, seconds.
# rpcMaxTime 600 # rpcMaxTime 600
# interval of DNode report status to MNode, unit is Second, for cluster version only # time interval of dnode status reporting to mnode, seconds, for cluster only
# statusInterval 1 # statusInterval 1
# interval of Shell send HB to MNode, unit is Second # time interval of heart beat from shell to dnode, seconds
# shellActivityTimer 3 # shellActivityTimer 3
# duration of to keep tableMeta kept in Cache, seconds # time of keeping table meta data in cache, seconds
# tableMetaKeepTimer 7200 # tableMetaKeepTimer 7200
# Minimum sliding window time # minimum sliding window time, milli-second
# minSlidingTime 10 # minSlidingTime 10
# Time window minimum # minimum time window, milli-second
# minIntervalTime 10 # minIntervalTime 10
# the max allowed delayed time for launching continuous query. 20ms by default # maximum delay before launching a stream compution, milli-second
# maxStreamCompDelay 20000 # maxStreamCompDelay 20000
# The minimum time to wait before the first stream execution # maximum delay before launching a stream computation for the first time, milli-second
# maxFirstStreamCompDelay 10000 # maxFirstStreamCompDelay 10000
# Retry wait time benchmark # retry delay when a stream computation fails, milli-second
# retryStreamCompDelay 10 # retryStreamCompDelay 10
# the delayed time for launching each continuous query. 10% of the whole computing time window by default. # the delayed time for launching a stream computation, from 0.1(default, 10% of whole computing time window) to 0.9
# streamCompDelayRatio 0.1 # streamCompDelayRatio 0.1
# max number of vgroups per db # max number of vgroups per db, 0 means configured automatically
# maxVgroupsPerDb 0 # maxVgroupsPerDb 0
# max number of tables per vnode # max number of tables per vnode
# maxTablesPerVnode 1000000 # maxTablesPerVnode 1000000
# Step size of increasing table number in vnode # step size of increasing table number in a vnode
# tableIncStepPerVnode 1000 # tableIncStepPerVnode 1000
# cache block size (Mbyte) # cache block size (Mbyte)
...@@ -110,22 +107,22 @@ ...@@ -110,22 +107,22 @@
# number of days to keep DB file # number of days to keep DB file
# keep 3650 # keep 3650
# min row of records in file block # minimum rows of records in file block
# minRows 100 # minRows 100
# max row of records in file block # maximum rows of records in file block
# maxRows 4096 # maxRows 4096
# enable/disable compression # enable/disable compression
# comp 2 # comp 2
# set write ahead log (WAL) level # write ahead log (WAL) level, 0: no wal; 1: write wal, but no fysnc; 2: write wal, and call fsync
# walLevel 1 # walLevel 1
# When walLevel is set to 2, the cycle of fsync is executed # if walLevel is set to 2, the cycle of fsync being executed, if set to 0, fsync is called right away
# fsync 3000 # fsync 3000
# number of replications, for cluster version only # number of replications, for cluster only
# replica 1 # replica 1
# mqtt uri # mqtt uri
...@@ -143,7 +140,7 @@ ...@@ -143,7 +140,7 @@
# max length of an SQL # max length of an SQL
# maxSQLLength 65480 # maxSQLLength 65480
# Support the maximum number of records allowed for super table time sorting # the maximum number of records allowed for super table time sorting
# maxNumOfOrderedRes 100000 # maxNumOfOrderedRes 100000
# system time zone # system time zone
...@@ -155,31 +152,31 @@ ...@@ -155,31 +152,31 @@
# default system charset # default system charset
# charset UTF-8 # charset UTF-8
# max number of connections from client for dnode # max number of connections allowed in dnode
# maxShellConns 5000 # maxShellConns 5000
# max numerber of connections to one database # max numerber of connections allowed in client
# maxConnections 5000 # maxConnections 5000
# Stop writing logs when the disk size of the log folder is less than this value # stop writing logs when the disk size of the log folder is less than this value
# minimalLogDirGB 0.1 # minimalLogDirGB 0.1
# Stop writing temporary files when the disk size of the log folder is less than this value # stop writing temporary files when the disk size of the log folder is less than this value
# minimalTmpDirGB 0.1 # minimalTmpDirGB 0.1
# Stop writing data when the disk size of the log folder is less than this value # stop writing data when the disk size of the log folder is less than this value
# minimalDataDirGB 0.1 # minimalDataDirGB 0.1
# start http service # enbale/disable http service
# http 1 # http 1
# start muqq service # enable/disable muqq service
# mqtt 0 # mqtt 0
# start system monitor module # enable/disable system monitor
# monitor 1 # monitor 1
# Record the SQL through restful interface # enable/disable recording the SQL statements via restful interface
# httpEnableRecordSql 0 # httpEnableRecordSql 0
# number of threads used to process http requests # number of threads used to process http requests
...@@ -255,11 +252,12 @@ ...@@ -255,11 +252,12 @@
# debug flag for http server # debug flag for http server
# tsdbDebugFlag 131 # tsdbDebugFlag 131
# Record the SQL in taos client # enable/disable recording the SQL in taos client
# tscEnableRecordSql 0 # tscEnableRecordSql 0
# if generate core file when service crash # generate core file when service crash
# enableCoreFile 1 # enableCoreFile 1
# The maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden # maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden
# maxBinaryDisplayWidth 30 # maxBinaryDisplayWidth 30
...@@ -42,10 +42,10 @@ extern char configDir[]; ...@@ -42,10 +42,10 @@ extern char configDir[];
#define BUFFER_SIZE 65536 #define BUFFER_SIZE 65536
#define MAX_DB_NAME_SIZE 64 #define MAX_DB_NAME_SIZE 64
#define MAX_TB_NAME_SIZE 64 #define MAX_TB_NAME_SIZE 64
#define MAX_DATA_SIZE 1024 #define MAX_DATA_SIZE 16000
#define MAX_NUM_DATATYPE 30 #define MAX_NUM_DATATYPE 10
#define OPT_ABORT 1 /* –abort */ #define OPT_ABORT 1 /* –abort */
#define STRING_LEN 512 #define STRING_LEN 60000
#define MAX_PREPARED_RAND 1000000 #define MAX_PREPARED_RAND 1000000
/* The options we understand. */ /* The options we understand. */
...@@ -155,7 +155,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { ...@@ -155,7 +155,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
strcasecmp(arg, "TINYINT") != 0 && strcasecmp(arg, "BOOL") != 0 && strcasecmp(arg, "TINYINT") != 0 && strcasecmp(arg, "BOOL") != 0 &&
strcasecmp(arg, "SMALLINT") != 0 && strcasecmp(arg, "SMALLINT") != 0 &&
strcasecmp(arg, "BIGINT") != 0 && strcasecmp(arg, "DOUBLE") != 0 && strcasecmp(arg, "BIGINT") != 0 && strcasecmp(arg, "DOUBLE") != 0 &&
strcasecmp(arg, "BINARY")) { strcasecmp(arg, "BINARY") && strcasecmp(arg, "NCHAR")) {
argp_error(state, "Invalid data_type!"); argp_error(state, "Invalid data_type!");
} }
sptr[0] = arg; sptr[0] = arg;
...@@ -171,11 +171,11 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { ...@@ -171,11 +171,11 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
strcasecmp(token, "BOOL") != 0 && strcasecmp(token, "BOOL") != 0 &&
strcasecmp(token, "SMALLINT") != 0 && strcasecmp(token, "SMALLINT") != 0 &&
strcasecmp(token, "BIGINT") != 0 && strcasecmp(token, "BIGINT") != 0 &&
strcasecmp(token, "DOUBLE") != 0 && strcasecmp(token, "BINARY")) { strcasecmp(token, "DOUBLE") != 0 && strcasecmp(token, "BINARY") && strcasecmp(token, "NCHAR")) {
argp_error(state, "Invalid data_type!"); argp_error(state, "Invalid data_type!");
} }
sptr[index++] = token; sptr[index++] = token;
token = strsep(&running, ", "); token = strsep(&running, ",");
if (index >= MAX_NUM_DATATYPE) break; if (index >= MAX_NUM_DATATYPE) break;
} }
} }
...@@ -412,7 +412,7 @@ int main(int argc, char *argv[]) { ...@@ -412,7 +412,7 @@ int main(int argc, char *argv[]) {
memset(dataString, 0, STRING_LEN); memset(dataString, 0, STRING_LEN);
int len = 0; int len = 0;
if (strcasecmp(data_type[0], "BINARY") == 0 || strcasecmp(data_type[0], "BOOL") == 0) { if (strcasecmp(data_type[0], "BINARY") == 0 || strcasecmp(data_type[0], "BOOL") == 0 || strcasecmp(data_type[0], "NCHAR") == 0 ) {
do_aggreFunc = false; do_aggreFunc = false;
} }
for (; count_data_type <= MAX_NUM_DATATYPE; count_data_type++) { for (; count_data_type <= MAX_NUM_DATATYPE; count_data_type++) {
...@@ -438,7 +438,7 @@ int main(int argc, char *argv[]) { ...@@ -438,7 +438,7 @@ int main(int argc, char *argv[]) {
printf("# Use metric: %s\n", use_metric ? "true" : "false"); printf("# Use metric: %s\n", use_metric ? "true" : "false");
printf("# Datatype of Columns: %s\n", dataString); printf("# Datatype of Columns: %s\n", dataString);
printf("# Binary Length(If applicable): %d\n", printf("# Binary Length(If applicable): %d\n",
(strcasestr(dataString, "BINARY") != NULL) ? len_of_binary : -1); (strcasestr(dataString, "BINARY") != NULL || strcasestr(dataString, "NCHAR") != NULL ) ? len_of_binary : -1);
printf("# Number of Columns per record: %d\n", ncols_per_record); printf("# Number of Columns per record: %d\n", ncols_per_record);
printf("# Number of Threads: %d\n", threads); printf("# Number of Threads: %d\n", threads);
printf("# Number of Tables: %d\n", ntables); printf("# Number of Tables: %d\n", ntables);
...@@ -466,7 +466,7 @@ int main(int argc, char *argv[]) { ...@@ -466,7 +466,7 @@ int main(int argc, char *argv[]) {
fprintf(fp, "# Use metric: %s\n", use_metric ? "true" : "false"); fprintf(fp, "# Use metric: %s\n", use_metric ? "true" : "false");
fprintf(fp, "# Datatype of Columns: %s\n", dataString); fprintf(fp, "# Datatype of Columns: %s\n", dataString);
fprintf(fp, "# Binary Length(If applicable): %d\n", fprintf(fp, "# Binary Length(If applicable): %d\n",
(strcasestr(dataString, "BINARY") != NULL) ? len_of_binary : -1); (strcasestr(dataString, "BINARY") != NULL || strcasestr(dataString, "NCHAR") != NULL ) ? len_of_binary : -1);
fprintf(fp, "# Number of Columns per record: %d\n", ncols_per_record); fprintf(fp, "# Number of Columns per record: %d\n", ncols_per_record);
fprintf(fp, "# Number of Threads: %d\n", threads); fprintf(fp, "# Number of Threads: %d\n", threads);
fprintf(fp, "# Number of Tables: %d\n", ntables); fprintf(fp, "# Number of Tables: %d\n", ntables);
...@@ -506,23 +506,23 @@ int main(int argc, char *argv[]) { ...@@ -506,23 +506,23 @@ int main(int argc, char *argv[]) {
len = 0; len = 0;
for (; colIndex < ncols_per_record - 1; colIndex++) { for (; colIndex < ncols_per_record - 1; colIndex++) {
if (strcasecmp(data_type[colIndex % count_data_type], "BINARY") != 0) { if (strcasecmp(data_type[colIndex % count_data_type], "BINARY") != 0 && strcasecmp(data_type[colIndex % count_data_type], "NCHAR") != 0) {
len += snprintf(cols + len, STRING_LEN - len, ",f%d %s", colIndex + 1, data_type[colIndex % count_data_type]); len += snprintf(cols + len, STRING_LEN - len, ",f%d %s", colIndex + 1, data_type[colIndex % count_data_type]);
} else { } else {
len += snprintf(cols + len, STRING_LEN - len, ",f%d %s(%d)", colIndex + 1, data_type[colIndex % count_data_type], len_of_binary); len += snprintf(cols + len, STRING_LEN - len, ",f%d %s(%d)", colIndex + 1, data_type[colIndex % count_data_type], len_of_binary);
} }
} }
if (strcasecmp(data_type[colIndex % count_data_type], "BINARY") != 0) { if (strcasecmp(data_type[colIndex % count_data_type], "BINARY") != 0 && strcasecmp(data_type[colIndex % count_data_type], "NCHAR") != 0){
len += snprintf(cols + len, STRING_LEN - len, ",f%d %s)", colIndex + 1, data_type[colIndex % count_data_type]); len += snprintf(cols + len, STRING_LEN - len, ",f%d %s", colIndex + 1, data_type[colIndex % count_data_type]);
} else { } else {
len += snprintf(cols + len, STRING_LEN - len, ",f%d %s(%d))", colIndex + 1, data_type[colIndex % count_data_type], len_of_binary); len += snprintf(cols + len, STRING_LEN - len, ",f%d %s(%d)", colIndex + 1, data_type[colIndex % count_data_type], len_of_binary);
} }
if (use_metric) { if (use_metric) {
/* Create metric table */ /* Create metric table */
printf("Creating meters super table...\n"); printf("Creating meters super table...\n");
snprintf(command, BUFFER_SIZE, "create table if not exists %s.meters (ts timestamp%s tags (areaid int, loc binary(10))", db_name, cols); snprintf(command, BUFFER_SIZE, "create table if not exists %s.meters (ts timestamp%s) tags (areaid int, loc binary(10))", db_name, cols);
queryDB(taos, command); queryDB(taos, command);
printf("meters created!\n"); printf("meters created!\n");
} }
...@@ -1272,6 +1272,10 @@ int32_t generateData(char *res, char **data_type, int num_of_cols, int64_t times ...@@ -1272,6 +1272,10 @@ int32_t generateData(char *res, char **data_type, int num_of_cols, int64_t times
char s[len_of_binary]; char s[len_of_binary];
rand_string(s, len_of_binary); rand_string(s, len_of_binary);
pstr += sprintf(pstr, ", \"%s\"", s); pstr += sprintf(pstr, ", \"%s\"", s);
}else if (strcasecmp(data_type[i % c], "nchar") == 0) {
char s[len_of_binary];
rand_string(s, len_of_binary);
pstr += sprintf(pstr, ", \"%s\"", s);
} }
if (pstr - res > MAX_DATA_SIZE) { if (pstr - res > MAX_DATA_SIZE) {
......
...@@ -643,7 +643,7 @@ class DbConn: ...@@ -643,7 +643,7 @@ class DbConn:
self.execute("use {}".format(dbName)) self.execute("use {}".format(dbName))
def hasDatabases(self): def hasDatabases(self):
return self.query("show databases") > 0 return self.query("show databases") > 1 # We now have a "log" database by default
def hasTables(self): def hasTables(self):
return self.query("show tables") > 0 return self.query("show tables") > 0
...@@ -850,6 +850,7 @@ class DbConnNative(DbConn): ...@@ -850,6 +850,7 @@ class DbConnNative(DbConn):
raise RuntimeError( raise RuntimeError(
"Cannot execute database commands until connection is open") "Cannot execute database commands until connection is open")
logger.debug("[SQL] Executing SQL: {}".format(sql)) logger.debug("[SQL] Executing SQL: {}".format(sql))
self._lastSql = sql
nRows = self._tdSql.execute(sql) nRows = self._tdSql.execute(sql)
logger.debug( logger.debug(
"[SQL] Execution Result, nRows = {}, SQL = {}".format( "[SQL] Execution Result, nRows = {}, SQL = {}".format(
...@@ -861,6 +862,7 @@ class DbConnNative(DbConn): ...@@ -861,6 +862,7 @@ class DbConnNative(DbConn):
raise RuntimeError( raise RuntimeError(
"Cannot query database until connection is open") "Cannot query database until connection is open")
logger.debug("[SQL] Executing SQL: {}".format(sql)) logger.debug("[SQL] Executing SQL: {}".format(sql))
self._lastSql = sql
nRows = self._tdSql.query(sql) nRows = self._tdSql.query(sql)
logger.debug( logger.debug(
"[SQL] Query Result, nRows = {}, SQL = {}".format( "[SQL] Query Result, nRows = {}, SQL = {}".format(
...@@ -1771,6 +1773,9 @@ class TdSuperTable: ...@@ -1771,6 +1773,9 @@ class TdSuperTable:
def __init__(self, stName): def __init__(self, stName):
self._stName = stName self._stName = stName
def getName(self):
return self._stName
def create(self, dbc, cols: dict, tags: dict): def create(self, dbc, cols: dict, tags: dict):
sql = "CREATE TABLE db.{} ({}) TAGS ({})".format( sql = "CREATE TABLE db.{} ({}) TAGS ({})".format(
self._stName, self._stName,
...@@ -1864,16 +1869,29 @@ class TaskReadData(StateTransitionTask): ...@@ -1864,16 +1869,29 @@ class TaskReadData(StateTransitionTask):
wt.getDbConn().close() wt.getDbConn().close()
wt.getDbConn().open() wt.getDbConn().open()
for rTbName in sTable.getRegTables(wt.getDbConn()): # regular tables dbc = wt.getDbConn()
aggExpr = Dice.choice(['*', 'count(*)', 'avg(speed)', for rTbName in sTable.getRegTables(dbc): # regular tables
aggExpr = Dice.choice([
'*',
'count(*)',
'avg(speed)',
# 'twa(speed)', # TODO: this one REQUIRES a where statement, not reasonable # 'twa(speed)', # TODO: this one REQUIRES a where statement, not reasonable
'sum(speed)', 'stddev(speed)', 'sum(speed)',
'min(speed)', 'max(speed)', 'first(speed)', 'last(speed)']) # TODO: add more from 'top' 'stddev(speed)',
'min(speed)',
'max(speed)',
'first(speed)',
'last(speed)']) # TODO: add more from 'top'
filterExpr = Dice.choice([ # TODO: add various kind of WHERE conditions
None
])
try: try:
self.execWtSql(wt, "select {} from db.{}".format(aggExpr, rTbName)) dbc.execute("select {} from db.{}".format(aggExpr, rTbName))
if aggExpr not in ['stddev(speed)']: #TODO: STDDEV not valid for super tables?!
dbc.execute("select {} from db.{}".format(aggExpr, sTable.getName()))
except taos.error.ProgrammingError as err: except taos.error.ProgrammingError as err:
errno2 = err.errno if (err.errno > 0) else 0x80000000 + err.errno errno2 = err.errno if (err.errno > 0) else 0x80000000 + err.errno
logger.debug("[=] Read Failure: errno=0x{:X}, msg: {}, SQL: {}".format(errno2, err, wt.getDbConn().getLastSql())) logger.debug("[=] Read Failure: errno=0x{:X}, msg: {}, SQL: {}".format(errno2, err, dbc.getLastSql()))
raise raise
class TaskDropSuperTable(StateTransitionTask): class TaskDropSuperTable(StateTransitionTask):
...@@ -2204,8 +2222,8 @@ class SvcManager: ...@@ -2204,8 +2222,8 @@ class SvcManager:
# print("Process: {}".format(proc.name())) # print("Process: {}".format(proc.name()))
self.svcMgrThread = ServiceManagerThread() # create the object self.svcMgrThread = ServiceManagerThread() # create the object
self.svcMgrThread.start()
print("Attempting to start TAOS service started, printing out output...") print("Attempting to start TAOS service started, printing out output...")
self.svcMgrThread.start()
self.svcMgrThread.procIpcBatch( self.svcMgrThread.procIpcBatch(
trimToTarget=10, trimToTarget=10,
forceOutput=True) # for printing 10 lines forceOutput=True) # for printing 10 lines
...@@ -2222,8 +2240,8 @@ class SvcManager: ...@@ -2222,8 +2240,8 @@ class SvcManager:
if self.svcMgrThread.isStopped(): if self.svcMgrThread.isStopped():
self.svcMgrThread.procIpcBatch(outputLines) # one last time self.svcMgrThread.procIpcBatch(outputLines) # one last time
self.svcMgrThread = None self.svcMgrThread = None
print("----- End of TDengine Service Output -----\n") print("End of TDengine Service Output")
print("SMT execution terminated") print("----- TDengine Service (managed by SMT) is now terminated -----\n")
else: else:
print("WARNING: SMT did not terminate as expected") print("WARNING: SMT did not terminate as expected")
...@@ -2330,6 +2348,8 @@ class ServiceManagerThread: ...@@ -2330,6 +2348,8 @@ class ServiceManagerThread:
self._status = MainExec.STATUS_STOPPING self._status = MainExec.STATUS_STOPPING
retCode = self._tdeSubProcess.stop() retCode = self._tdeSubProcess.stop()
print("Attempted to stop sub process, got return code: {}".format(retCode)) print("Attempted to stop sub process, got return code: {}".format(retCode))
if (retCode==-11): # SGV
logger.error("[[--ERROR--]]: TDengine service SEGV fault (check core file!)")
if self._tdeSubProcess.isRunning(): # still running if self._tdeSubProcess.isRunning(): # still running
print("FAILED to stop sub process, it is still running... pid = {}".format( print("FAILED to stop sub process, it is still running... pid = {}".format(
...@@ -2624,12 +2644,12 @@ class ClientManager: ...@@ -2624,12 +2644,12 @@ class ClientManager:
def _printLastNumbers(self): # to verify data durability def _printLastNumbers(self): # to verify data durability
dbManager = DbManager(resetDb=False) dbManager = DbManager(resetDb=False)
dbc = dbManager.getDbConn() dbc = dbManager.getDbConn()
if dbc.query("show databases") == 0: # no databae if dbc.query("show databases") <= 1: # no database (we have a default called "log")
return return
dbc.execute("use db")
if dbc.query("show tables") == 0: # no tables if dbc.query("show tables") == 0: # no tables
return return
dbc.execute("use db")
sTbName = dbManager.getFixedSuperTableName() sTbName = dbManager.getFixedSuperTableName()
# get all regular tables # get all regular tables
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册