提交 3539dd2b 编写于 作者: H Hongze Cheng

Merge branch 'develop' into hotfix/sync_loss_data

......@@ -281,6 +281,30 @@ SELECT select_expr [, select_expr ...]
[LIMIT limit_val [, OFFSET offset_val]]
[>> export_file]
```
说明:针对 insert 类型的 SQL 语句,我们采用的流式解析策略,在发现后面的错误之前,前面正确的部分SQL仍会执行。下面的sql中,insert语句是无效的,但是d1001仍会被创建。
```mysql
taos> create table meters(ts timestamp, current float, voltage int, phase float) tags(location binary(30), groupId int);
Query OK, 0 row(s) affected (0.008245s)
taos> show stables;
name | created_time | columns | tags | tables |
============================================================================================
meters | 2020-08-06 17:50:27.831 | 4 | 2 | 0 |
Query OK, 1 row(s) in set (0.001029s)
taos> show tables;
Query OK, 0 row(s) in set (0.000946s)
taos> insert into d1001 using meters tags('Beijing.Chaoyang', 2);
DB error: invalid SQL: keyword VALUES or FILE required
taos> show tables;
table_name | created_time | columns | stable_name |
======================================================================================================
d1001 | 2020-08-06 17:52:02.097 | 4 | meters |
Query OK, 1 row(s) in set (0.001091s)
```
#### SELECT子句
一个选择子句可以是联合查询(UNION)和另一个查询的子查询(SUBQUERY)。
......@@ -314,6 +338,7 @@ taos> SELECT * FROM meters;
Query OK, 9 row(s) in set (0.002022s)
```
通配符支持表名前缀,以下两个SQL语句均为返回全部的列:
```mysql
SELECT * FROM d1001;
......
......@@ -5,20 +5,20 @@
# #
########################################################
# first full-qualified domain name (FQDN) for TDengine system
# first fully qualified domain name (FQDN) for TDengine system
# firstEp hostname1:6030
# second full-qualified domain name (FQDN) for TDengine system, for cluster edition only
# second fully qualified domain name (FQDN) for TDengine system, for cluster only
# secondEp cluster_hostname2:6030
# the full-qualified domain name (FQDN) of dnode
# local fully qualified domain name (FQDN)
# fqdn hostname
# port for MNode connect to Client, default udp/tcp [6030-6040]
# first port number for the connection (10 continuous UDP/TCP port number are used)
# serverPort 6030
# http service port, default tcp [6020]
# httpPort 6020
# http service port, default tcp [6041]
# httpPort 6041
# log file's directory
# logDir /var/log/taos
......@@ -26,76 +26,73 @@
# data file's directory
# dataDir /var/lib/taos
# the arbitrator's full-qualified domain name (FQDN) for TDengine system, for cluster edition only
# the arbitrator's fully qualified domain name (FQDN) for TDengine system, for cluster only
# arbitrator arbitrator_hostname:6030
# number of threads per CPU core
# numOfThreadsPerCore 1.0
# the ratio of threads responsible for querying in the total thread
# ratioOfQueryThreads 0.5
# number of management nodes in the system
# numOfMnodes 3
# if backup vnode directory when remove dnode
# enable/disable backuping vnode directory when removing dnode
# vnodeBak 1
# Whether to start load balancing
# enable/disable load balancing
# balance 1
# optional roles for dnode. 0 - any, 1 - mnode, 2 - dnode
# role for dnode. 0 - any, 1 - mnode, 2 - dnode
# role 0
# max timer control block
# max timer control blocks
# maxTmrCtrl 512
# interval of system monitor
# time interval of system monitor, seconds
# monitorInterval 30
# number of seconds allowed for a dnode to be offline, for cluster version only
# number of seconds allowed for a dnode to be offline, for cluster only
# offlineThreshold 8640000
# RPC re-try timer, millisecond
# rpcTimer 300
# RPC maximum time for ack, seconds
# RPC maximum time for ack, seconds.
# rpcMaxTime 600
# interval of DNode report status to MNode, unit is Second, for cluster version only
# time interval of dnode status reporting to mnode, seconds, for cluster only
# statusInterval 1
# interval of Shell send HB to MNode, unit is Second
# time interval of heart beat from shell to dnode, seconds
# shellActivityTimer 3
# duration of to keep tableMeta kept in Cache, seconds
# time of keeping table meta data in cache, seconds
# tableMetaKeepTimer 7200
# Minimum sliding window time
# minimum sliding window time, milli-second
# minSlidingTime 10
# Time window minimum
# minimum time window, milli-second
# minIntervalTime 10
# the max allowed delayed time for launching continuous query. 20ms by default
# maximum delay before launching a stream compution, milli-second
# maxStreamCompDelay 20000
# The minimum time to wait before the first stream execution
# maximum delay before launching a stream computation for the first time, milli-second
# maxFirstStreamCompDelay 10000
# Retry wait time benchmark
# retry delay when a stream computation fails, milli-second
# retryStreamCompDelay 10
# the delayed time for launching each continuous query. 10% of the whole computing time window by default.
# the delayed time for launching a stream computation, from 0.1(default, 10% of whole computing time window) to 0.9
# streamCompDelayRatio 0.1
# max number of vgroups per db
# max number of vgroups per db, 0 means configured automatically
# maxVgroupsPerDb 0
# max number of tables per vnode
# maxTablesPerVnode 1000000
# Step size of increasing table number in vnode
# step size of increasing table number in a vnode
# tableIncStepPerVnode 1000
# cache block size (Mbyte)
......@@ -110,22 +107,22 @@
# number of days to keep DB file
# keep 3650
# min row of records in file block
# minimum rows of records in file block
# minRows 100
# max row of records in file block
# maximum rows of records in file block
# maxRows 4096
# enable/disable compression
# comp 2
# set write ahead log (WAL) level
# write ahead log (WAL) level, 0: no wal; 1: write wal, but no fysnc; 2: write wal, and call fsync
# walLevel 1
# When walLevel is set to 2, the cycle of fsync is executed
# if walLevel is set to 2, the cycle of fsync being executed, if set to 0, fsync is called right away
# fsync 3000
# number of replications, for cluster version only
# number of replications, for cluster only
# replica 1
# mqtt uri
......@@ -143,7 +140,7 @@
# max length of an SQL
# maxSQLLength 65480
# Support the maximum number of records allowed for super table time sorting
# the maximum number of records allowed for super table time sorting
# maxNumOfOrderedRes 100000
# system time zone
......@@ -155,31 +152,31 @@
# default system charset
# charset UTF-8
# max number of connections from client for dnode
# max number of connections allowed in dnode
# maxShellConns 5000
# max numerber of connections to one database
# max numerber of connections allowed in client
# maxConnections 5000
# Stop writing logs when the disk size of the log folder is less than this value
# stop writing logs when the disk size of the log folder is less than this value
# minimalLogDirGB 0.1
# Stop writing temporary files when the disk size of the log folder is less than this value
# stop writing temporary files when the disk size of the log folder is less than this value
# minimalTmpDirGB 0.1
# Stop writing data when the disk size of the log folder is less than this value
# stop writing data when the disk size of the log folder is less than this value
# minimalDataDirGB 0.1
# start http service
# enbale/disable http service
# http 1
# start muqq service
# enable/disable muqq service
# mqtt 0
# start system monitor module
# enable/disable system monitor
# monitor 1
# Record the SQL through restful interface
# enable/disable recording the SQL statements via restful interface
# httpEnableRecordSql 0
# number of threads used to process http requests
......@@ -255,11 +252,12 @@
# debug flag for http server
# tsdbDebugFlag 131
# Record the SQL in taos client
# enable/disable recording the SQL in taos client
# tscEnableRecordSql 0
# if generate core file when service crash
# generate core file when service crash
# enableCoreFile 1
# The maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden
# maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden
# maxBinaryDisplayWidth 30
......@@ -42,10 +42,10 @@ extern char configDir[];
#define BUFFER_SIZE 65536
#define MAX_DB_NAME_SIZE 64
#define MAX_TB_NAME_SIZE 64
#define MAX_DATA_SIZE 1024
#define MAX_NUM_DATATYPE 30
#define MAX_DATA_SIZE 16000
#define MAX_NUM_DATATYPE 10
#define OPT_ABORT 1 /* –abort */
#define STRING_LEN 512
#define STRING_LEN 60000
#define MAX_PREPARED_RAND 1000000
/* The options we understand. */
......@@ -155,7 +155,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
strcasecmp(arg, "TINYINT") != 0 && strcasecmp(arg, "BOOL") != 0 &&
strcasecmp(arg, "SMALLINT") != 0 &&
strcasecmp(arg, "BIGINT") != 0 && strcasecmp(arg, "DOUBLE") != 0 &&
strcasecmp(arg, "BINARY")) {
strcasecmp(arg, "BINARY") && strcasecmp(arg, "NCHAR")) {
argp_error(state, "Invalid data_type!");
}
sptr[0] = arg;
......@@ -171,11 +171,11 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
strcasecmp(token, "BOOL") != 0 &&
strcasecmp(token, "SMALLINT") != 0 &&
strcasecmp(token, "BIGINT") != 0 &&
strcasecmp(token, "DOUBLE") != 0 && strcasecmp(token, "BINARY")) {
strcasecmp(token, "DOUBLE") != 0 && strcasecmp(token, "BINARY") && strcasecmp(token, "NCHAR")) {
argp_error(state, "Invalid data_type!");
}
sptr[index++] = token;
token = strsep(&running, ", ");
token = strsep(&running, ",");
if (index >= MAX_NUM_DATATYPE) break;
}
}
......@@ -412,7 +412,7 @@ int main(int argc, char *argv[]) {
memset(dataString, 0, STRING_LEN);
int len = 0;
if (strcasecmp(data_type[0], "BINARY") == 0 || strcasecmp(data_type[0], "BOOL") == 0) {
if (strcasecmp(data_type[0], "BINARY") == 0 || strcasecmp(data_type[0], "BOOL") == 0 || strcasecmp(data_type[0], "NCHAR") == 0 ) {
do_aggreFunc = false;
}
for (; count_data_type <= MAX_NUM_DATATYPE; count_data_type++) {
......@@ -438,7 +438,7 @@ int main(int argc, char *argv[]) {
printf("# Use metric: %s\n", use_metric ? "true" : "false");
printf("# Datatype of Columns: %s\n", dataString);
printf("# Binary Length(If applicable): %d\n",
(strcasestr(dataString, "BINARY") != NULL) ? len_of_binary : -1);
(strcasestr(dataString, "BINARY") != NULL || strcasestr(dataString, "NCHAR") != NULL ) ? len_of_binary : -1);
printf("# Number of Columns per record: %d\n", ncols_per_record);
printf("# Number of Threads: %d\n", threads);
printf("# Number of Tables: %d\n", ntables);
......@@ -466,7 +466,7 @@ int main(int argc, char *argv[]) {
fprintf(fp, "# Use metric: %s\n", use_metric ? "true" : "false");
fprintf(fp, "# Datatype of Columns: %s\n", dataString);
fprintf(fp, "# Binary Length(If applicable): %d\n",
(strcasestr(dataString, "BINARY") != NULL) ? len_of_binary : -1);
(strcasestr(dataString, "BINARY") != NULL || strcasestr(dataString, "NCHAR") != NULL ) ? len_of_binary : -1);
fprintf(fp, "# Number of Columns per record: %d\n", ncols_per_record);
fprintf(fp, "# Number of Threads: %d\n", threads);
fprintf(fp, "# Number of Tables: %d\n", ntables);
......@@ -506,23 +506,23 @@ int main(int argc, char *argv[]) {
len = 0;
for (; colIndex < ncols_per_record - 1; colIndex++) {
if (strcasecmp(data_type[colIndex % count_data_type], "BINARY") != 0) {
if (strcasecmp(data_type[colIndex % count_data_type], "BINARY") != 0 && strcasecmp(data_type[colIndex % count_data_type], "NCHAR") != 0) {
len += snprintf(cols + len, STRING_LEN - len, ",f%d %s", colIndex + 1, data_type[colIndex % count_data_type]);
} else {
len += snprintf(cols + len, STRING_LEN - len, ",f%d %s(%d)", colIndex + 1, data_type[colIndex % count_data_type], len_of_binary);
}
}
if (strcasecmp(data_type[colIndex % count_data_type], "BINARY") != 0) {
len += snprintf(cols + len, STRING_LEN - len, ",f%d %s)", colIndex + 1, data_type[colIndex % count_data_type]);
if (strcasecmp(data_type[colIndex % count_data_type], "BINARY") != 0 && strcasecmp(data_type[colIndex % count_data_type], "NCHAR") != 0){
len += snprintf(cols + len, STRING_LEN - len, ",f%d %s", colIndex + 1, data_type[colIndex % count_data_type]);
} else {
len += snprintf(cols + len, STRING_LEN - len, ",f%d %s(%d))", colIndex + 1, data_type[colIndex % count_data_type], len_of_binary);
len += snprintf(cols + len, STRING_LEN - len, ",f%d %s(%d)", colIndex + 1, data_type[colIndex % count_data_type], len_of_binary);
}
if (use_metric) {
/* Create metric table */
printf("Creating meters super table...\n");
snprintf(command, BUFFER_SIZE, "create table if not exists %s.meters (ts timestamp%s tags (areaid int, loc binary(10))", db_name, cols);
snprintf(command, BUFFER_SIZE, "create table if not exists %s.meters (ts timestamp%s) tags (areaid int, loc binary(10))", db_name, cols);
queryDB(taos, command);
printf("meters created!\n");
}
......@@ -1272,6 +1272,10 @@ int32_t generateData(char *res, char **data_type, int num_of_cols, int64_t times
char s[len_of_binary];
rand_string(s, len_of_binary);
pstr += sprintf(pstr, ", \"%s\"", s);
}else if (strcasecmp(data_type[i % c], "nchar") == 0) {
char s[len_of_binary];
rand_string(s, len_of_binary);
pstr += sprintf(pstr, ", \"%s\"", s);
}
if (pstr - res > MAX_DATA_SIZE) {
......
......@@ -643,7 +643,7 @@ class DbConn:
self.execute("use {}".format(dbName))
def hasDatabases(self):
return self.query("show databases") > 0
return self.query("show databases") > 1 # We now have a "log" database by default
def hasTables(self):
return self.query("show tables") > 0
......@@ -850,6 +850,7 @@ class DbConnNative(DbConn):
raise RuntimeError(
"Cannot execute database commands until connection is open")
logger.debug("[SQL] Executing SQL: {}".format(sql))
self._lastSql = sql
nRows = self._tdSql.execute(sql)
logger.debug(
"[SQL] Execution Result, nRows = {}, SQL = {}".format(
......@@ -861,6 +862,7 @@ class DbConnNative(DbConn):
raise RuntimeError(
"Cannot query database until connection is open")
logger.debug("[SQL] Executing SQL: {}".format(sql))
self._lastSql = sql
nRows = self._tdSql.query(sql)
logger.debug(
"[SQL] Query Result, nRows = {}, SQL = {}".format(
......@@ -1771,6 +1773,9 @@ class TdSuperTable:
def __init__(self, stName):
self._stName = stName
def getName(self):
return self._stName
def create(self, dbc, cols: dict, tags: dict):
sql = "CREATE TABLE db.{} ({}) TAGS ({})".format(
self._stName,
......@@ -1864,16 +1869,29 @@ class TaskReadData(StateTransitionTask):
wt.getDbConn().close()
wt.getDbConn().open()
for rTbName in sTable.getRegTables(wt.getDbConn()): # regular tables
aggExpr = Dice.choice(['*', 'count(*)', 'avg(speed)',
dbc = wt.getDbConn()
for rTbName in sTable.getRegTables(dbc): # regular tables
aggExpr = Dice.choice([
'*',
'count(*)',
'avg(speed)',
# 'twa(speed)', # TODO: this one REQUIRES a where statement, not reasonable
'sum(speed)', 'stddev(speed)',
'min(speed)', 'max(speed)', 'first(speed)', 'last(speed)']) # TODO: add more from 'top'
'sum(speed)',
'stddev(speed)',
'min(speed)',
'max(speed)',
'first(speed)',
'last(speed)']) # TODO: add more from 'top'
filterExpr = Dice.choice([ # TODO: add various kind of WHERE conditions
None
])
try:
self.execWtSql(wt, "select {} from db.{}".format(aggExpr, rTbName))
dbc.execute("select {} from db.{}".format(aggExpr, rTbName))
if aggExpr not in ['stddev(speed)']: #TODO: STDDEV not valid for super tables?!
dbc.execute("select {} from db.{}".format(aggExpr, sTable.getName()))
except taos.error.ProgrammingError as err:
errno2 = err.errno if (err.errno > 0) else 0x80000000 + err.errno
logger.debug("[=] Read Failure: errno=0x{:X}, msg: {}, SQL: {}".format(errno2, err, wt.getDbConn().getLastSql()))
logger.debug("[=] Read Failure: errno=0x{:X}, msg: {}, SQL: {}".format(errno2, err, dbc.getLastSql()))
raise
class TaskDropSuperTable(StateTransitionTask):
......@@ -2204,8 +2222,8 @@ class SvcManager:
# print("Process: {}".format(proc.name()))
self.svcMgrThread = ServiceManagerThread() # create the object
self.svcMgrThread.start()
print("Attempting to start TAOS service started, printing out output...")
self.svcMgrThread.start()
self.svcMgrThread.procIpcBatch(
trimToTarget=10,
forceOutput=True) # for printing 10 lines
......@@ -2222,8 +2240,8 @@ class SvcManager:
if self.svcMgrThread.isStopped():
self.svcMgrThread.procIpcBatch(outputLines) # one last time
self.svcMgrThread = None
print("----- End of TDengine Service Output -----\n")
print("SMT execution terminated")
print("End of TDengine Service Output")
print("----- TDengine Service (managed by SMT) is now terminated -----\n")
else:
print("WARNING: SMT did not terminate as expected")
......@@ -2330,6 +2348,8 @@ class ServiceManagerThread:
self._status = MainExec.STATUS_STOPPING
retCode = self._tdeSubProcess.stop()
print("Attempted to stop sub process, got return code: {}".format(retCode))
if (retCode==-11): # SGV
logger.error("[[--ERROR--]]: TDengine service SEGV fault (check core file!)")
if self._tdeSubProcess.isRunning(): # still running
print("FAILED to stop sub process, it is still running... pid = {}".format(
......@@ -2624,12 +2644,12 @@ class ClientManager:
def _printLastNumbers(self): # to verify data durability
dbManager = DbManager(resetDb=False)
dbc = dbManager.getDbConn()
if dbc.query("show databases") == 0: # no databae
if dbc.query("show databases") <= 1: # no database (we have a default called "log")
return
dbc.execute("use db")
if dbc.query("show tables") == 0: # no tables
return
dbc.execute("use db")
sTbName = dbManager.getFixedSuperTableName()
# get all regular tables
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册