diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 964f1a1d0f3811f461d5e408a8cc456a205f5b98..2beb0c8e7efc3b0348d37264358fa4c64219de16 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -71,16 +71,15 @@ extern char configDir[]; #define HEAD_BUFF_LEN TSDB_MAX_COLUMNS*24 // 16*MAX_COLUMNS + (192+32)*2 + insert into .. -#define MAX_SQL_SIZE 65536 -#define BUFFER_SIZE (65536*2) -#define COND_BUF_LEN (BUFFER_SIZE - 30) +#define COL_BUFFER_LEN (TSDB_MAX_BYTES_PER_ROW - 50) +#define BUFFER_SIZE (50 + TSDB_DB_NAME_LEN + TSDB_TABLE_NAME_LEN + TSDB_MAX_BYTES_PER_ROW + TSDB_MAX_TAGS_LEN) +#define COND_BUF_LEN (BUFFER_SIZE - 30) #define MAX_USERNAME_SIZE 64 #define MAX_PASSWORD_SIZE 64 #define MAX_HOSTNAME_SIZE 64 #define MAX_TB_NAME_SIZE 64 #define MAX_DATA_SIZE (16*TSDB_MAX_COLUMNS)+20 // max record len: 16*MAX_COLUMNS, timestamp string and ,('') need extra space #define OPT_ABORT 1 /* –abort */ -#define STRING_LEN 60000 #define MAX_PREPARED_RAND 1000000 #define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255. @@ -2707,7 +2706,7 @@ static int createSuperTable( char command[BUFFER_SIZE] = "\0"; - char cols[STRING_LEN] = "\0"; + char cols[COL_BUFFER_LEN] = "\0"; int colIndex; int len = 0; @@ -2723,55 +2722,55 @@ static int createSuperTable( char* dataType = superTbl->columns[colIndex].dataType; if (strcasecmp(dataType, "BINARY") == 0) { - len += snprintf(cols + len, STRING_LEN - len, - ", col%d %s(%d)", colIndex, "BINARY", + len += snprintf(cols + len, COL_BUFFER_LEN - len, + ", C%d %s(%d)", colIndex, "BINARY", superTbl->columns[colIndex].dataLen); lenOfOneRow += superTbl->columns[colIndex].dataLen + 3; } else if (strcasecmp(dataType, "NCHAR") == 0) { - len += snprintf(cols + len, STRING_LEN - len, - ", col%d %s(%d)", colIndex, "NCHAR", + len += snprintf(cols + len, COL_BUFFER_LEN - len, + ", C%d %s(%d)", colIndex, "NCHAR", superTbl->columns[colIndex].dataLen); lenOfOneRow += superTbl->columns[colIndex].dataLen + 3; } else if (strcasecmp(dataType, "INT") == 0) { if ((g_args.demo_mode) && (colIndex == 1)) { - len += snprintf(cols + len, STRING_LEN - len, + len += snprintf(cols + len, COL_BUFFER_LEN - len, ", VOLTAGE INT"); } else { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "INT"); + len += snprintf(cols + len, COL_BUFFER_LEN - len, ", C%d %s", colIndex, "INT"); } lenOfOneRow += 11; } else if (strcasecmp(dataType, "BIGINT") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", + len += snprintf(cols + len, COL_BUFFER_LEN - len, ", C%d %s", colIndex, "BIGINT"); lenOfOneRow += 21; } else if (strcasecmp(dataType, "SMALLINT") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", + len += snprintf(cols + len, COL_BUFFER_LEN - len, ", C%d %s", colIndex, "SMALLINT"); lenOfOneRow += 6; } else if (strcasecmp(dataType, "TINYINT") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "TINYINT"); + len += snprintf(cols + len, COL_BUFFER_LEN - len, ", C%d %s", colIndex, "TINYINT"); lenOfOneRow += 4; } else if (strcasecmp(dataType, "BOOL") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "BOOL"); + len += snprintf(cols + len, COL_BUFFER_LEN - len, ", C%d %s", colIndex, "BOOL"); lenOfOneRow += 6; } else if (strcasecmp(dataType, "FLOAT") == 0) { if (g_args.demo_mode) { if (colIndex == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", CURRENT FLOAT"); + len += snprintf(cols + len, COL_BUFFER_LEN - len, ", CURRENT FLOAT"); } else if (colIndex == 2) { - len += snprintf(cols + len, STRING_LEN - len, ", PHASE FLOAT"); + len += snprintf(cols + len, COL_BUFFER_LEN - len, ", PHASE FLOAT"); } } else { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "FLOAT"); + len += snprintf(cols + len, COL_BUFFER_LEN - len, ", C%d %s", colIndex, "FLOAT"); } lenOfOneRow += 22; } else if (strcasecmp(dataType, "DOUBLE") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", + len += snprintf(cols + len, COL_BUFFER_LEN - len, ", C%d %s", colIndex, "DOUBLE"); lenOfOneRow += 42; } else if (strcasecmp(dataType, "TIMESTAMP") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", + len += snprintf(cols + len, COL_BUFFER_LEN - len, ", C%d %s", colIndex, "TIMESTAMP"); lenOfOneRow += 21; } else { @@ -2803,60 +2802,63 @@ static int createSuperTable( return -1; } - char tags[STRING_LEN] = "\0"; + char tags[TSDB_MAX_TAGS_LEN] = "\0"; int tagIndex; len = 0; int lenOfTagOfOneRow = 0; - len += snprintf(tags + len, STRING_LEN - len, "("); + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "("); for (tagIndex = 0; tagIndex < superTbl->tagCount; tagIndex++) { char* dataType = superTbl->tags[tagIndex].dataType; if (strcasecmp(dataType, "BINARY") == 0) { if ((g_args.demo_mode) && (tagIndex == 1)) { - len += snprintf(tags + len, STRING_LEN - len, + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "location BINARY(%d), ", superTbl->tags[tagIndex].dataLen); } else { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", - tagIndex, "BINARY", superTbl->tags[tagIndex].dataLen); + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "t%d %s(%d), ", tagIndex, "BINARY", + superTbl->tags[tagIndex].dataLen); } lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3; } else if (strcasecmp(dataType, "NCHAR") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", tagIndex, + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "t%d %s(%d), ", tagIndex, "NCHAR", superTbl->tags[tagIndex].dataLen); lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3; } else if (strcasecmp(dataType, "INT") == 0) { if ((g_args.demo_mode) && (tagIndex == 0)) { - len += snprintf(tags + len, STRING_LEN - len, "groupId INT, "); + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "groupId INT, "); } else { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "INT"); + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "t%d %s, ", tagIndex, "INT"); } lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 11; } else if (strcasecmp(dataType, "BIGINT") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "BIGINT"); + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "t%d %s, ", tagIndex, "BIGINT"); lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 21; } else if (strcasecmp(dataType, "SMALLINT") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "SMALLINT"); + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "t%d %s, ", tagIndex, "SMALLINT"); lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 6; } else if (strcasecmp(dataType, "TINYINT") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "TINYINT"); + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "t%d %s, ", tagIndex, "TINYINT"); lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 4; } else if (strcasecmp(dataType, "BOOL") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "BOOL"); + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "t%d %s, ", tagIndex, "BOOL"); lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 6; } else if (strcasecmp(dataType, "FLOAT") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "FLOAT"); + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "t%d %s, ", tagIndex, "FLOAT"); lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 22; } else if (strcasecmp(dataType, "DOUBLE") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "DOUBLE"); + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "t%d %s, ", tagIndex, "DOUBLE"); lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 42; } else { taos_close(taos); @@ -2867,7 +2869,7 @@ static int createSuperTable( } len -= 2; - len += snprintf(tags + len, STRING_LEN - len, ")"); + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, ")"); superTbl->lenOfTagOfOneRow = lenOfTagOfOneRow; @@ -3020,175 +3022,175 @@ static int createDatabasesAndStables() { static void* createTable(void *sarg) { - threadInfo *pThreadInfo = (threadInfo *)sarg; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + threadInfo *pThreadInfo = (threadInfo *)sarg; + SSuperTable* superTblInfo = pThreadInfo->superTblInfo; - uint64_t lastPrintTime = taosGetTimestampMs(); + uint64_t lastPrintTime = taosGetTimestampMs(); - int buff_len; - buff_len = BUFFER_SIZE / 8; + int buff_len; + buff_len = BUFFER_SIZE; - pThreadInfo->buffer = calloc(buff_len, 1); - if (pThreadInfo->buffer == NULL) { - errorPrint("%s() LN%d, Memory allocated failed!\n", __func__, __LINE__); - exit(-1); - } - - int len = 0; - int batchNum = 0; - - verbosePrint("%s() LN%d: Creating table from %"PRIu64" to %"PRIu64"\n", - __func__, __LINE__, - pThreadInfo->start_table_from, pThreadInfo->end_table_to); - - for (uint64_t i = pThreadInfo->start_table_from; - i <= pThreadInfo->end_table_to; i++) { - if (0 == g_Dbs.use_metric) { - snprintf(pThreadInfo->buffer, buff_len, - "create table if not exists %s.%s%"PRIu64" %s;", - pThreadInfo->db_name, - g_args.tb_prefix, i, - pThreadInfo->cols); - } else { - if (superTblInfo == NULL) { - errorPrint("%s() LN%d, use metric, but super table info is NULL\n", - __func__, __LINE__); - free(pThreadInfo->buffer); + pThreadInfo->buffer = calloc(buff_len, 1); + if (pThreadInfo->buffer == NULL) { + errorPrint("%s() LN%d, Memory allocated failed!\n", __func__, __LINE__); exit(-1); - } else { - if (0 == len) { - batchNum = 0; - memset(pThreadInfo->buffer, 0, buff_len); - len += snprintf(pThreadInfo->buffer + len, - buff_len - len, "create table "); - } - char* tagsValBuf = NULL; - if (0 == superTblInfo->tagSource) { - tagsValBuf = generateTagVaulesForStb(superTblInfo, i); + } + + int len = 0; + int batchNum = 0; + + verbosePrint("%s() LN%d: Creating table from %"PRIu64" to %"PRIu64"\n", + __func__, __LINE__, + pThreadInfo->start_table_from, pThreadInfo->end_table_to); + + for (uint64_t i = pThreadInfo->start_table_from; + i <= pThreadInfo->end_table_to; i++) { + if (0 == g_Dbs.use_metric) { + snprintf(pThreadInfo->buffer, buff_len, + "create table if not exists %s.%s%"PRIu64" %s;", + pThreadInfo->db_name, + g_args.tb_prefix, i, + pThreadInfo->cols); } else { - tagsValBuf = getTagValueFromTagSample( - superTblInfo, - i % superTblInfo->tagSampleCount); - } - if (NULL == tagsValBuf) { - free(pThreadInfo->buffer); - return NULL; - } - len += snprintf(pThreadInfo->buffer + len, - buff_len - len, - "if not exists %s.%s%"PRIu64" using %s.%s tags %s ", - pThreadInfo->db_name, superTblInfo->childTblPrefix, - i, pThreadInfo->db_name, - superTblInfo->sTblName, tagsValBuf); - free(tagsValBuf); - batchNum++; - if ((batchNum < superTblInfo->batchCreateTableNum) - && ((buff_len - len) - >= (superTblInfo->lenOfTagOfOneRow + 256))) { - continue; + if (superTblInfo == NULL) { + errorPrint("%s() LN%d, use metric, but super table info is NULL\n", + __func__, __LINE__); + free(pThreadInfo->buffer); + exit(-1); + } else { + if (0 == len) { + batchNum = 0; + memset(pThreadInfo->buffer, 0, buff_len); + len += snprintf(pThreadInfo->buffer + len, + buff_len - len, "create table "); + } + char* tagsValBuf = NULL; + if (0 == superTblInfo->tagSource) { + tagsValBuf = generateTagVaulesForStb(superTblInfo, i); + } else { + tagsValBuf = getTagValueFromTagSample( + superTblInfo, + i % superTblInfo->tagSampleCount); + } + if (NULL == tagsValBuf) { + free(pThreadInfo->buffer); + return NULL; + } + len += snprintf(pThreadInfo->buffer + len, + buff_len - len, + "if not exists %s.%s%"PRIu64" using %s.%s tags %s ", + pThreadInfo->db_name, superTblInfo->childTblPrefix, + i, pThreadInfo->db_name, + superTblInfo->sTblName, tagsValBuf); + free(tagsValBuf); + batchNum++; + if ((batchNum < superTblInfo->batchCreateTableNum) + && ((buff_len - len) + >= (superTblInfo->lenOfTagOfOneRow + 256))) { + continue; + } + } } - } - } - len = 0; - if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer, - NO_INSERT_TYPE, false)){ - errorPrint( "queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer); - free(pThreadInfo->buffer); - return NULL; - } + len = 0; + if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer, + NO_INSERT_TYPE, false)){ + errorPrint( "queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer); + free(pThreadInfo->buffer); + return NULL; + } - uint64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30*1000) { - printf("thread[%d] already create %"PRIu64" - %"PRIu64" tables\n", - pThreadInfo->threadID, pThreadInfo->start_table_from, i); - lastPrintTime = currentPrintTime; + uint64_t currentPrintTime = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] already create %"PRIu64" - %"PRIu64" tables\n", + pThreadInfo->threadID, pThreadInfo->start_table_from, i); + lastPrintTime = currentPrintTime; + } } - } - if (0 != len) { - if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer, - NO_INSERT_TYPE, false)) { - errorPrint( "queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer); + if (0 != len) { + if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer, + NO_INSERT_TYPE, false)) { + errorPrint( "queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer); + } } - } - free(pThreadInfo->buffer); - return NULL; + free(pThreadInfo->buffer); + return NULL; } static int startMultiThreadCreateChildTable( char* cols, int threads, uint64_t tableFrom, int64_t ntables, char* db_name, SSuperTable* superTblInfo) { - pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); - threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); - - if ((NULL == pids) || (NULL == infos)) { - printf("malloc failed\n"); - exit(-1); - } + pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); + threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); - if (threads < 1) { - threads = 1; - } + if ((NULL == pids) || (NULL == infos)) { + printf("malloc failed\n"); + exit(-1); + } - int64_t a = ntables / threads; - if (a < 1) { - threads = ntables; - a = 1; - } + if (threads < 1) { + threads = 1; + } - int64_t b = 0; - b = ntables % threads; - - for (int64_t i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; - pThreadInfo->threadID = i; - tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN); - pThreadInfo->superTblInfo = superTblInfo; - verbosePrint("%s() %d db_name: %s\n", __func__, __LINE__, db_name); - pThreadInfo->taos = taos_connect( - g_Dbs.host, - g_Dbs.user, - g_Dbs.password, - db_name, - g_Dbs.port); - if (pThreadInfo->taos == NULL) { - errorPrint( "%s() LN%d, Failed to connect to TDengine, reason:%s\n", - __func__, __LINE__, taos_errstr(NULL)); - free(pids); - free(infos); - return -1; + int64_t a = ntables / threads; + if (a < 1) { + threads = ntables; + a = 1; } - pThreadInfo->start_table_from = tableFrom; - pThreadInfo->ntables = iend_table_to = i < b ? tableFrom + a : tableFrom + a - 1; - tableFrom = pThreadInfo->end_table_to + 1; - pThreadInfo->use_metric = true; - pThreadInfo->cols = cols; - pThreadInfo->minDelay = UINT64_MAX; - pthread_create(pids + i, NULL, createTable, pThreadInfo); - } + int64_t b = 0; + b = ntables % threads; - for (int i = 0; i < threads; i++) { - pthread_join(pids[i], NULL); - } + for (int64_t i = 0; i < threads; i++) { + threadInfo *pThreadInfo = infos + i; + pThreadInfo->threadID = i; + tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN); + pThreadInfo->superTblInfo = superTblInfo; + verbosePrint("%s() %d db_name: %s\n", __func__, __LINE__, db_name); + pThreadInfo->taos = taos_connect( + g_Dbs.host, + g_Dbs.user, + g_Dbs.password, + db_name, + g_Dbs.port); + if (pThreadInfo->taos == NULL) { + errorPrint( "%s() LN%d, Failed to connect to TDengine, reason:%s\n", + __func__, __LINE__, taos_errstr(NULL)); + free(pids); + free(infos); + return -1; + } - for (int i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; - taos_close(pThreadInfo->taos); - } + pThreadInfo->start_table_from = tableFrom; + pThreadInfo->ntables = iend_table_to = i < b ? tableFrom + a : tableFrom + a - 1; + tableFrom = pThreadInfo->end_table_to + 1; + pThreadInfo->use_metric = true; + pThreadInfo->cols = cols; + pThreadInfo->minDelay = UINT64_MAX; + pthread_create(pids + i, NULL, createTable, pThreadInfo); + } - free(pids); - free(infos); + for (int i = 0; i < threads; i++) { + pthread_join(pids[i], NULL); + } - return 0; + for (int i = 0; i < threads; i++) { + threadInfo *pThreadInfo = infos + i; + taos_close(pThreadInfo->taos); + } + + free(pids); + free(infos); + + return 0; } static void createChildTables() { - char tblColsBuf[MAX_SQL_SIZE]; + char tblColsBuf[TSDB_MAX_BYTES_PER_ROW]; int len; for (int i = 0; i < g_Dbs.dbCount; i++) { @@ -3220,21 +3222,21 @@ static void createChildTables() { } } else { // normal table - len = snprintf(tblColsBuf, MAX_SQL_SIZE, "(TS TIMESTAMP"); + len = snprintf(tblColsBuf, TSDB_MAX_BYTES_PER_ROW, "(TS TIMESTAMP"); for (int j = 0; j < g_args.num_of_CPR; j++) { if ((strncasecmp(g_args.datatype[j], "BINARY", strlen("BINARY")) == 0) || (strncasecmp(g_args.datatype[j], "NCHAR", strlen("NCHAR")) == 0)) { - snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, - ", COL%d %s(%d)", j, g_args.datatype[j], g_args.len_of_binary); + snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len, + ",C%d %s(%d)", j, g_args.datatype[j], g_args.len_of_binary); } else { - snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, - ", COL%d %s", j, g_args.datatype[j]); + snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len, + ",C%d %s", j, g_args.datatype[j]); } len = strlen(tblColsBuf); } - snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, ")"); + snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len, ")"); verbosePrint("%s() LN%d: dbName: %s num of tb: %"PRId64" schema: %s\n", __func__, __LINE__, @@ -7986,7 +7988,7 @@ static void initOfQueryMeta() { tstrncpy(g_queryInfo.password, TSDB_DEFAULT_PASS, MAX_PASSWORD_SIZE); } -static void setParaFromArg(){ +static void setParaFromArg() { if (g_args.host) { tstrncpy(g_Dbs.host, g_args.host, MAX_HOSTNAME_SIZE); } else { @@ -8022,10 +8024,10 @@ static void setParaFromArg(){ g_Dbs.do_aggreFunc = true; - char dataString[STRING_LEN]; + char dataString[TSDB_MAX_BYTES_PER_ROW]; char **data_type = g_args.datatype; - memset(dataString, 0, STRING_LEN); + memset(dataString, 0, TSDB_MAX_BYTES_PER_ROW); if (strcasecmp(data_type[0], "BINARY") == 0 || strcasecmp(data_type[0], "BOOL") == 0 @@ -8143,7 +8145,7 @@ static void querySqlFile(TAOS* taos, char* sqlFile) } int read_len = 0; - char * cmd = calloc(1, MAX_SQL_SIZE); + char * cmd = calloc(1, TSDB_MAX_BYTES_PER_ROW); size_t cmd_len = 0; char * line = NULL; size_t line_len = 0; @@ -8151,7 +8153,7 @@ static void querySqlFile(TAOS* taos, char* sqlFile) double t = taosGetTimestampMs(); while((read_len = tgetline(&line, &line_len, fp)) != -1) { - if (read_len >= MAX_SQL_SIZE) continue; + if (read_len >= TSDB_MAX_BYTES_PER_ROW) continue; line[--read_len] = '\0'; if (read_len == 0 || isCommentLine(line)) { // line starts with # @@ -8174,7 +8176,7 @@ static void querySqlFile(TAOS* taos, char* sqlFile) tmfclose(fp); return; } - memset(cmd, 0, MAX_SQL_SIZE); + memset(cmd, 0, TSDB_MAX_BYTES_PER_ROW); cmd_len = 0; } diff --git a/tests/pytest/query/query1970YearsAf.py b/tests/pytest/query/query1970YearsAf.py index 93404afd5967e772b383b8e2439ec4e89f5a7029..a365369b21fc7429216f5c1e8c624bf856a744c1 100644 --- a/tests/pytest/query/query1970YearsAf.py +++ b/tests/pytest/query/query1970YearsAf.py @@ -187,19 +187,19 @@ class TDTestCase: "select * from t9 where t9.ts > '1969-12-31 22:00:00.000' and t9.ts <'1970-01-01 02:00:00.000' " ) tdSql.checkRows(719) - + tdSql.query( "select * from t0,t1 where t0.ts=t1.ts and t1.ts >= '1970-01-01 00:00:00.000' " ) tdSql.checkRows(680) - + tdSql.query( - "select diff(col1) from t0 where t0.ts >= '1970-01-01 00:00:00.000' " + "select diff(c1) from t0 where t0.ts >= '1970-01-01 00:00:00.000' " ) tdSql.checkRows(679) tdSql.query( - "select t0,col1 from stb2 where stb2.ts < '1970-01-01 00:00:00.000' order by ts" + "select t0,c1 from stb2 where stb2.ts < '1970-01-01 00:00:00.000' order by ts" ) tdSql.checkRows(43200) diff --git a/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.py b/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.py index 081057f1802bd18d8aab7e7639589e8759ed44ed..4edef88cf182eee88e42615fb007bbe4756f0c7c 100644 --- a/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.py +++ b/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.py @@ -23,7 +23,7 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) - + def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -39,7 +39,7 @@ class TDTestCase: buildPath = root[:len(root)-len("/build/bin")] break return buildPath - + def run(self): buildPath = self.getBuildPath() if (buildPath == ""): @@ -48,7 +48,7 @@ class TDTestCase: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath+ "/build/bin/" - # insert: create one or mutiple tables per sql and insert multiple rows per sql + # insert: create one or mutiple tables per sql and insert multiple rows per sql # test case for https://jira.taosdata.com:18080/browse/TD-4985 os.system("%staosdemo -f tools/taosdemoAllTest/TD-4985/query-limit-offset.json -y " % binPath) tdSql.execute("use db") @@ -56,27 +56,27 @@ class TDTestCase: tdSql.checkData(0, 0, 10000) for i in range(1000): - tdSql.execute('''insert into stb00_9999 values(%d, %d, %d,'test99.%s')''' + tdSql.execute('''insert into stb00_9999 values(%d, %d, %d,'test99.%s')''' % (1600000000000 + i, i, -10000+i, i)) - tdSql.execute('''insert into stb00_8888 values(%d, %d, %d,'test98.%s')''' + tdSql.execute('''insert into stb00_8888 values(%d, %d, %d,'test98.%s')''' % (1600000000000 + i, i, -10000+i, i)) - tdSql.execute('''insert into stb00_7777 values(%d, %d, %d,'test97.%s')''' + tdSql.execute('''insert into stb00_7777 values(%d, %d, %d,'test97.%s')''' % (1600000000000 + i, i, -10000+i, i)) - tdSql.execute('''insert into stb00_6666 values(%d, %d, %d,'test96.%s')''' + tdSql.execute('''insert into stb00_6666 values(%d, %d, %d,'test96.%s')''' % (1600000000000 + i, i, -10000+i, i)) - tdSql.execute('''insert into stb00_5555 values(%d, %d, %d,'test95.%s')''' + tdSql.execute('''insert into stb00_5555 values(%d, %d, %d,'test95.%s')''' % (1600000000000 + i, i, -10000+i, i)) - tdSql.execute('''insert into stb00_4444 values(%d, %d, %d,'test94.%s')''' + tdSql.execute('''insert into stb00_4444 values(%d, %d, %d,'test94.%s')''' % (1600000000000 + i, i, -10000+i, i)) - tdSql.execute('''insert into stb00_3333 values(%d, %d, %d,'test93.%s')''' + tdSql.execute('''insert into stb00_3333 values(%d, %d, %d,'test93.%s')''' % (1600000000000 + i, i, -10000+i, i)) - tdSql.execute('''insert into stb00_2222 values(%d, %d, %d,'test92.%s')''' + tdSql.execute('''insert into stb00_2222 values(%d, %d, %d,'test92.%s')''' % (1600000000000 + i, i, -10000+i, i)) - tdSql.execute('''insert into stb00_1111 values(%d, %d, %d,'test91.%s')''' + tdSql.execute('''insert into stb00_1111 values(%d, %d, %d,'test91.%s')''' % (1600000000000 + i, i, -10000+i, i)) - tdSql.execute('''insert into stb00_100 values(%d, %d, %d,'test90.%s')''' + tdSql.execute('''insert into stb00_100 values(%d, %d, %d,'test90.%s')''' % (1600000000000 + i, i, -10000+i, i)) - tdSql.query("select * from stb0 where col2 like 'test99%' ") + tdSql.query("select * from stb0 where c2 like 'test99%' ") tdSql.checkRows(1000) tdSql.query("select * from stb0 where tbname like 'stb00_9999' limit 10" ) tdSql.checkData(0, 1, 0) @@ -86,7 +86,7 @@ class TDTestCase: tdSql.checkData(0, 1, 5) tdSql.checkData(1, 1, 6) tdSql.checkData(2, 1, 7) - tdSql.query("select * from stb0 where col2 like 'test98%' ") + tdSql.query("select * from stb0 where c2 like 'test98%' ") tdSql.checkRows(1000) tdSql.query("select * from stb0 where tbname like 'stb00_8888' limit 10" ) tdSql.checkData(0, 1, 0) @@ -96,7 +96,7 @@ class TDTestCase: tdSql.checkData(0, 1, 5) tdSql.checkData(1, 1, 6) tdSql.checkData(2, 1, 7) - tdSql.query("select * from stb0 where col2 like 'test97%' ") + tdSql.query("select * from stb0 where c2 like 'test97%' ") tdSql.checkRows(1000) tdSql.query("select * from stb0 where tbname like 'stb00_7777' limit 10" ) tdSql.checkData(0, 1, 0) @@ -106,7 +106,7 @@ class TDTestCase: tdSql.checkData(0, 1, 5) tdSql.checkData(1, 1, 6) tdSql.checkData(2, 1, 7) - tdSql.query("select * from stb0 where col2 like 'test96%' ") + tdSql.query("select * from stb0 where c2 like 'test96%' ") tdSql.checkRows(1000) tdSql.query("select * from stb0 where tbname like 'stb00_6666' limit 10" ) tdSql.checkData(0, 1, 0) @@ -116,7 +116,7 @@ class TDTestCase: tdSql.checkData(0, 1, 5) tdSql.checkData(1, 1, 6) tdSql.checkData(2, 1, 7) - tdSql.query("select * from stb0 where col2 like 'test95%' ") + tdSql.query("select * from stb0 where c2 like 'test95%' ") tdSql.checkRows(1000) tdSql.query("select * from stb0 where tbname like 'stb00_5555' limit 10" ) tdSql.checkData(0, 1, 0) @@ -126,7 +126,7 @@ class TDTestCase: tdSql.checkData(0, 1, 5) tdSql.checkData(1, 1, 6) tdSql.checkData(2, 1, 7) - tdSql.query("select * from stb0 where col2 like 'test94%' ") + tdSql.query("select * from stb0 where c2 like 'test94%' ") tdSql.checkRows(1000) tdSql.query("select * from stb0 where tbname like 'stb00_4444' limit 10" ) tdSql.checkData(0, 1, 0) @@ -136,7 +136,7 @@ class TDTestCase: tdSql.checkData(0, 1, 5) tdSql.checkData(1, 1, 6) tdSql.checkData(2, 1, 7) - tdSql.query("select * from stb0 where col2 like 'test93%' ") + tdSql.query("select * from stb0 where c2 like 'test93%' ") tdSql.checkRows(1000) tdSql.query("select * from stb0 where tbname like 'stb00_3333' limit 100" ) tdSql.checkData(0, 1, 0) @@ -146,7 +146,7 @@ class TDTestCase: tdSql.checkData(0, 1, 5) tdSql.checkData(1, 1, 6) tdSql.checkData(2, 1, 7) - tdSql.query("select * from stb0 where col2 like 'test92%' ") + tdSql.query("select * from stb0 where c2 like 'test92%' ") tdSql.checkRows(1000) tdSql.query("select * from stb0 where tbname like 'stb00_2222' limit 100" ) tdSql.checkData(0, 1, 0) @@ -156,7 +156,7 @@ class TDTestCase: tdSql.checkData(0, 1, 5) tdSql.checkData(1, 1, 6) tdSql.checkData(2, 1, 7) - tdSql.query("select * from stb0 where col2 like 'test91%' ") + tdSql.query("select * from stb0 where c2 like 'test91%' ") tdSql.checkRows(1000) tdSql.query("select * from stb0 where tbname like 'stb00_1111' limit 100" ) tdSql.checkData(0, 1, 0) @@ -166,7 +166,7 @@ class TDTestCase: tdSql.checkData(0, 1, 5) tdSql.checkData(1, 1, 6) tdSql.checkData(2, 1, 7) - tdSql.query("select * from stb0 where col2 like 'test90%' ") + tdSql.query("select * from stb0 where c2 like 'test90%' ") tdSql.checkRows(1000) tdSql.query("select * from stb0 where tbname like 'stb00_100' limit 100" ) tdSql.checkData(0, 1, 0) @@ -178,10 +178,10 @@ class TDTestCase: tdSql.checkData(2, 1, 7) - os.system("rm -rf tools/taosdemoAllTest/TD-4985/query-limit-offset.py.sql") - - - + os.system("rm -rf tools/taosdemoAllTest/TD-4985/query-limit-offset.py.sql") + + + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py index b710e34d8c06c85fd9a1ba06763939e7c4fdcdfd..78bd0c7e6053c7caff5a7b44b425474e78a0733d 100755 --- a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py +++ b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py @@ -23,7 +23,7 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) - + def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -39,7 +39,7 @@ class TDTestCase: buildPath = root[:len(root)-len("/build/bin")] break return buildPath - + def run(self): buildPath = self.getBuildPath() if (buildPath == ""): @@ -48,7 +48,7 @@ class TDTestCase: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath+ "/build/bin/" - # insert: create one or mutiple tables per sql and insert multiple rows per sql + # insert: create one or mutiple tables per sql and insert multiple rows per sql # test case for https://jira.taosdata.com:18080/browse/TD-5213 os.system("%staosdemo -f tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json -y " % binPath) tdSql.execute("use db") @@ -58,29 +58,29 @@ class TDTestCase: # tdSql.query("select * from stb_old") # tdSql.checkRows(10) # tdSql.checkCols(1024) - + # tdSql.query("select count (tbname) from stb_new") # tdSql.checkData(0, 0, 10) # tdSql.query("select * from stb_new") # tdSql.checkRows(10) # tdSql.checkCols(4096) - + # tdLog.info("stop dnode to commit data to disk") # tdDnodes.stop(1) - # tdDnodes.start(1) + # tdDnodes.start(1) #regular table sql = "create table tb(ts timestamp, " for i in range(1022): - sql += "col%d binary(14), " % (i + 1) - sql += "col1023 binary(22))" + sql += "c%d binary(14), " % (i + 1) + sql += "c1023 binary(22))" tdSql.execute(sql) for i in range(4): sql = "insert into tb values(%d, " for j in range(1022): - str = "'%s', " % self.get_random_string(14) + str = "'%s', " % self.get_random_string(14) sql += str sql += "'%s')" % self.get_random_string(22) tdSql.execute(sql % (self.ts + i)) @@ -94,19 +94,19 @@ class TDTestCase: time.sleep(1) tdSql.query("select count(*) from tb") - tdSql.checkData(0, 0, 4) + tdSql.checkData(0, 0, 4) sql = "create table tb1(ts timestamp, " for i in range(4094): - sql += "col%d binary(14), " % (i + 1) - sql += "col4095 binary(22))" + sql += "c%d binary(14), " % (i + 1) + sql += "c4095 binary(22))" tdSql.execute(sql) for i in range(4): sql = "insert into tb1 values(%d, " for j in range(4094): - str = "'%s', " % self.get_random_string(14) + str = "'%s', " % self.get_random_string(14) sql += str sql += "'%s')" % self.get_random_string(22) tdSql.execute(sql % (self.ts + i)) @@ -120,14 +120,14 @@ class TDTestCase: time.sleep(1) tdSql.query("select count(*) from tb1") - tdSql.checkData(0, 0, 4) - + tdSql.checkData(0, 0, 4) + + + + #os.system("rm -rf tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py.sql") + - #os.system("rm -rf tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py.sql") - - - def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py index 01e46eaaa00326c0da2aa2f61bb14a7349f3ca7f..d9d6cc1082e9eac9ef3a900152bcbb8b77942c61 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py @@ -23,7 +23,7 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) - + def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -39,7 +39,7 @@ class TDTestCase: buildPath = root[:len(root)-len("/build/bin")] break return buildPath - + def run(self): buildPath = self.getBuildPath() if (buildPath == ""): @@ -48,7 +48,7 @@ class TDTestCase: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath+ "/build/bin/" - # insert: create one or mutiple tables per sql and insert multiple rows per sql + # insert: create one or mutiple tables per sql and insert multiple rows per sql os.system("%staosdemo -f tools/taosdemoAllTest/insert-1s1tnt1r.json -y " % binPath) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") @@ -62,7 +62,7 @@ class TDTestCase: tdSql.query("select count(*) from stb01_1") tdSql.checkData(0, 0, 200) tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 200000) + tdSql.checkData(0, 0, 200000) # restful connector insert data os.system("%staosdemo -f tools/taosdemoAllTest/insertRestful.json -y " % binPath) @@ -81,7 +81,7 @@ class TDTestCase: tdSql.checkData(0, 0, 200) - # insert: create mutiple tables per sql and insert one rows per sql . + # insert: create mutiple tables per sql and insert one rows per sql . os.system("%staosdemo -f tools/taosdemoAllTest/insert-1s1tntmr.json -y " % binPath) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") @@ -89,34 +89,34 @@ class TDTestCase: tdSql.query("select count (tbname) from stb1") tdSql.checkData(0, 0, 20) tdSql.query("select count(*) from stb00_0") - tdSql.checkData(0, 0, 10000) + tdSql.checkData(0, 0, 10000) tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 100000) + tdSql.checkData(0, 0, 100000) tdSql.query("select count(*) from stb01_0") - tdSql.checkData(0, 0, 20000) + tdSql.checkData(0, 0, 20000) tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 400000) + tdSql.checkData(0, 0, 400000) - # insert: using parament "insert_interval to controls spped of insert. + # insert: using parament "insert_interval to controls spped of insert. # but We need to have accurate methods to control the speed, such as getting the speed value, checking the count and so on。 os.system("%staosdemo -f tools/taosdemoAllTest/insert-interval-speed.json -y" % binPath) tdSql.execute("use db") tdSql.query("show stables") tdSql.checkData(0, 4, 100) tdSql.query("select count(*) from stb00_0") - tdSql.checkData(0, 0, 20000) + tdSql.checkData(0, 0, 20000) tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 2000000) + tdSql.checkData(0, 0, 2000000) tdSql.query("show stables") tdSql.checkData(1, 4, 100) tdSql.query("select count(*) from stb01_0") - tdSql.checkData(0, 0, 20000) + tdSql.checkData(0, 0, 20000) tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 2000000) - + tdSql.checkData(0, 0, 2000000) + # spend 2min30s for 3 testcases. # insert: drop and child_table_exists combination test - # insert: using parament "childtable_offset and childtable_limit" to control table'offset point and offset + # insert: using parament "childtable_offset and childtable_limit" to control table'offset point and offset os.system("%staosdemo -f tools/taosdemoAllTest/insert-nodbnodrop.json -y" % binPath) tdSql.error("show dbno.stables") os.system("%staosdemo -f tools/taosdemoAllTest/insert-newdb.json -y" % binPath) @@ -128,41 +128,41 @@ class TDTestCase: tdSql.query("select count (tbname) from stb2") tdSql.checkData(0, 0, 7) tdSql.query("select count (tbname) from stb3") - tdSql.checkData(0, 0, 8) + tdSql.checkData(0, 0, 8) tdSql.query("select count (tbname) from stb4") - tdSql.checkData(0, 0, 8) + tdSql.checkData(0, 0, 8) os.system("%staosdemo -f tools/taosdemoAllTest/insert-offset.json -y" % binPath) - tdSql.execute("use db") + tdSql.execute("use db") tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 50) + tdSql.checkData(0, 0, 50) tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 240) + tdSql.checkData(0, 0, 240) tdSql.query("select count(*) from stb2") - tdSql.checkData(0, 0, 220) + tdSql.checkData(0, 0, 220) tdSql.query("select count(*) from stb3") tdSql.checkData(0, 0, 180) tdSql.query("select count(*) from stb4") tdSql.checkData(0, 0, 160) os.system("%staosdemo -f tools/taosdemoAllTest/insert-newtable.json -y" % binPath) - tdSql.execute("use db") + tdSql.execute("use db") tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 150) + tdSql.checkData(0, 0, 150) tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 360) + tdSql.checkData(0, 0, 360) tdSql.query("select count(*) from stb2") - tdSql.checkData(0, 0, 360) + tdSql.checkData(0, 0, 360) tdSql.query("select count(*) from stb3") tdSql.checkData(0, 0, 340) tdSql.query("select count(*) from stb4") tdSql.checkData(0, 0, 400) os.system("%staosdemo -f tools/taosdemoAllTest/insert-renewdb.json -y" % binPath) - tdSql.execute("use db") + tdSql.execute("use db") tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 50) + tdSql.checkData(0, 0, 50) tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 120) + tdSql.checkData(0, 0, 120) tdSql.query("select count(*) from stb2") - tdSql.checkData(0, 0, 140) + tdSql.checkData(0, 0, 140) tdSql.query("select count(*) from stb3") tdSql.checkData(0, 0, 160) tdSql.query("select count(*) from stb4") @@ -170,59 +170,59 @@ class TDTestCase: # insert: let parament in json file is illegal, it'll expect error. - tdSql.execute("drop database if exists db") + tdSql.execute("drop database if exists db") os.system("%staosdemo -f tools/taosdemoAllTest/insertColumnsAndTagNumLarge1024.json -y " % binPath) tdSql.error("use db") - tdSql.execute("drop database if exists db") + tdSql.execute("drop database if exists db") os.system("%staosdemo -f tools/taosdemoAllTest/insertSigcolumnsNum1024.json -y " % binPath) tdSql.error("select * from db.stb0") - tdSql.execute("drop database if exists db") + tdSql.execute("drop database if exists db") os.system("%staosdemo -f tools/taosdemoAllTest/insertColumnsAndTagNum1024.json -y " % binPath) tdSql.query("select count(*) from db.stb0") - tdSql.checkData(0, 0, 10000) - tdSql.execute("drop database if exists db") + tdSql.checkData(0, 0, 10000) + tdSql.execute("drop database if exists db") os.system("%staosdemo -f tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json -y " % binPath) tdSql.query("select count(*) from db.stb0") tdSql.checkRows(0) - tdSql.execute("drop database if exists db") + tdSql.execute("drop database if exists db") os.system("%staosdemo -f tools/taosdemoAllTest/insertColumnsNum0.json -y " % binPath) - tdSql.execute("use db") + tdSql.execute("use db") tdSql.query("show stables like 'stb0%' ") tdSql.checkData(0, 2, 11) - tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/insertTagsNumLarge128.json -y " % binPath) - tdSql.error("use db1") - tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar16384.json -y " % binPath) - tdSql.query("select count(*) from db.stb0") + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/insertTagsNumLarge128.json -y " % binPath) + tdSql.error("use db1") + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar16384.json -y " % binPath) + tdSql.query("select count(*) from db.stb0") tdSql.checkRows(1) - tdSql.query("select count(*) from db.stb1") + tdSql.query("select count(*) from db.stb1") tdSql.checkRows(1) tdSql.error("select * from db.stb3") tdSql.error("select * from db.stb2") - tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/insertNumOfrecordPerReq0.json -y " % binPath) - tdSql.error("select count(*) from db.stb0") - tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/insertNumOfrecordPerReqless0.json -y " % binPath) - tdSql.error("use db") - tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/insertChildTab0.json -y " % binPath) - tdSql.error("use db") - tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/insertChildTabLess0.json -y " % binPath) - tdSql.error("use db") - tdSql.execute("drop database if exists blf") - os.system("%staosdemo -f tools/taosdemoAllTest/insertTimestepMulRowsLargeint16.json -y " % binPath) - tdSql.execute("use blf") - tdSql.query("select ts from blf.p_0_topics_7 limit 262800,1") + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/insertNumOfrecordPerReq0.json -y " % binPath) + tdSql.error("select count(*) from db.stb0") + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/insertNumOfrecordPerReqless0.json -y " % binPath) + tdSql.error("use db") + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/insertChildTab0.json -y " % binPath) + tdSql.error("use db") + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/insertChildTabLess0.json -y " % binPath) + tdSql.error("use db") + tdSql.execute("drop database if exists blf") + os.system("%staosdemo -f tools/taosdemoAllTest/insertTimestepMulRowsLargeint16.json -y " % binPath) + tdSql.execute("use blf") + tdSql.query("select ts from blf.p_0_topics_7 limit 262800,1") tdSql.checkData(0, 0, "2020-03-31 12:00:00.000") tdSql.query("select first(ts) from blf.p_0_topics_2") tdSql.checkData(0, 0, "2019-10-01 00:00:00") - tdSql.query("select last(ts) from blf.p_0_topics_6 ") + tdSql.query("select last(ts) from blf.p_0_topics_6 ") tdSql.checkData(0, 0, "2020-09-29 23:59:00") - os.system("%staosdemo -f tools/taosdemoAllTest/insertMaxNumPerReq.json -y " % binPath) - tdSql.execute("use db") + os.system("%staosdemo -f tools/taosdemoAllTest/insertMaxNumPerReq.json -y " % binPath) + tdSql.execute("use db") tdSql.query("select count(*) from stb0") tdSql.checkData(0, 0, 5000000) tdSql.query("select count(*) from stb1") @@ -230,7 +230,7 @@ class TDTestCase: - # insert: timestamp and step + # insert: timestamp and step os.system("%staosdemo -f tools/taosdemoAllTest/insert-timestep.json -y " % binPath) tdSql.execute("use db") tdSql.query("show stables") @@ -239,13 +239,13 @@ class TDTestCase: tdSql.query("select count (tbname) from stb1") tdSql.checkData(0, 0, 20) tdSql.query("select last(ts) from db.stb00_0") - tdSql.checkData(0, 0, "2020-10-01 00:00:00.019000") + tdSql.checkData(0, 0, "2020-10-01 00:00:00.019000") tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 200) + tdSql.checkData(0, 0, 200) tdSql.query("select last(ts) from db.stb01_0") - tdSql.checkData(0, 0, "2020-11-01 00:00:00.190000") + tdSql.checkData(0, 0, "2020-11-01 00:00:00.190000") tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 400) + tdSql.checkData(0, 0, 400) # # insert: disorder_ratio os.system("%staosdemo -f tools/taosdemoAllTest/insert-disorder.json -g 2>&1 -y " % binPath) @@ -255,14 +255,14 @@ class TDTestCase: tdSql.query("select count (tbname) from stb1") tdSql.checkData(0, 0, 1) tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 10) + tdSql.checkData(0, 0, 10) tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 10) + tdSql.checkData(0, 0, 10) # insert: sample json os.system("%staosdemo -f tools/taosdemoAllTest/insert-sample.json -y " % binPath) tdSql.execute("use dbtest123") - tdSql.query("select col2 from stb0") + tdSql.query("select c2 from stb0") tdSql.checkData(0, 0, 2147483647) tdSql.query("select * from stb1 where t1=-127") tdSql.checkRows(20) @@ -271,13 +271,13 @@ class TDTestCase: tdSql.query("select * from stb1 where t2=126") tdSql.checkRows(10) - # insert: test interlace parament + # insert: test interlace parament os.system("%staosdemo -f tools/taosdemoAllTest/insert-interlace-row.json -y " % binPath) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") tdSql.checkData(0, 0, 100) tdSql.query("select count (*) from stb0") - tdSql.checkData(0, 0, 15000) + tdSql.checkData(0, 0, 15000) # # insert: auto_create @@ -317,10 +317,10 @@ class TDTestCase: tdSql.checkRows(20) os.system("rm -rf ./insert_res.txt") - os.system("rm -rf tools/taosdemoAllTest/taosdemoTestInsertWithJson.py.sql") - - - + os.system("rm -rf tools/taosdemoAllTest/taosdemoTestInsertWithJson.py.sql") + + + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/tools/taosdemoTest.py b/tests/pytest/tools/taosdemoTest.py index 4cae8dfd3cabcee3c52a2d1eefea41496994745d..5662881031a01d19398cce223892eebbd8133c97 100644 --- a/tests/pytest/tools/taosdemoTest.py +++ b/tests/pytest/tools/taosdemoTest.py @@ -59,11 +59,11 @@ class TDTestCase: tdSql.checkData(0, 0, self.numberOfTables * self.numberOfRecords) tdSql.query( - "select sum(col1) from test.meters interval(1h) sliding(30m)") + "select sum(c1) from test.meters interval(1h) sliding(30m)") tdSql.checkRows(2) tdSql.query( - "select apercentile(col1, 1) from test.meters interval(100s)") + "select apercentile(c1, 1) from test.meters interval(100s)") tdSql.checkRows(1) tdSql.error("select loc, count(loc) from test.meters") diff --git a/tests/pytest/wal/sdbComp.py b/tests/pytest/wal/sdbComp.py index 56b18c49eb002791cbfbf1956e448e36694c1316..428fbc9a145c0c3bae4507e33242ff3670c85024 100644 --- a/tests/pytest/wal/sdbComp.py +++ b/tests/pytest/wal/sdbComp.py @@ -26,11 +26,11 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) - + def getBuildPath(self): global selfPath selfPath = os.path.dirname(os.path.realpath(__file__)) - + if ("community" in selfPath): projPath = selfPath[:selfPath.find("community")] else: @@ -43,7 +43,7 @@ class TDTestCase: buildPath = root[:len(root)-len("/build/bin")] break return buildPath - + def run(self): # set path para @@ -62,9 +62,9 @@ class TDTestCase: os.system("rm -rf %s/sim/dnode1/data/mnode_bak/" % testPath) tdSql.execute("drop database if exists db2") os.system("%staosdemo -f wal/insertDataDb1.json -y " % binPath) - tdSql.execute("drop database if exists db1") + tdSql.execute("drop database if exists db1") os.system("%staosdemo -f wal/insertDataDb2.json -y " % binPath) - tdSql.execute("drop table if exists db2.stb0") + tdSql.execute("drop table if exists db2.stb0") os.system("%staosdemo -f wal/insertDataDb2Newstab.json -y " % binPath) query_pid1 = int(subprocess.getstatusoutput('ps aux|grep taosd |grep -v "grep"|awk \'{print $2}\'')[1]) print(query_pid1) @@ -72,14 +72,14 @@ class TDTestCase: tdSql.execute("drop table if exists stb1_0") tdSql.execute("drop table if exists stb1_1") tdSql.execute("insert into stb0_0 values(1614218412000,8637,78.861045,'R','bf3')(1614218422000,8637,98.861045,'R','bf3')") - tdSql.execute("alter table db2.stb0 add column col4 int") - tdSql.execute("alter table db2.stb0 drop column col2") - tdSql.execute("alter table db2.stb0 add tag t3 int;") + tdSql.execute("alter table db2.stb0 add column c4 int") + tdSql.execute("alter table db2.stb0 drop column c2") + tdSql.execute("alter table db2.stb0 add tag t3 int;") tdSql.execute("alter table db2.stb0 drop tag t1") - tdSql.execute("create table if not exists stb2_0 (ts timestamp, col0 int, col1 float) ") + tdSql.execute("create table if not exists stb2_0 (ts timestamp, c0 int, c1 float) ") tdSql.execute("insert into stb2_0 values(1614218412000,8637,78.861045)") - tdSql.execute("alter table stb2_0 add column col2 binary(4)") - tdSql.execute("alter table stb2_0 drop column col1") + tdSql.execute("alter table stb2_0 add column c2 binary(4)") + tdSql.execute("alter table stb2_0 drop column c1") tdSql.execute("insert into stb2_0 values(1614218422000,8638,'R')") # stop taosd and compact wal file @@ -87,9 +87,9 @@ class TDTestCase: sleep(10) os.system("nohup %s/taosd --compact-mnode-wal -c %s/sim/dnode1/cfg/ & " %(binPath,testPath) ) sleep(5) - assert os.path.exists(walFilePath) , "%s is not generated, compact didn't take effect " % walFilePath + assert os.path.exists(walFilePath) , "%s is not generated, compact didn't take effect " % walFilePath - # use new wal file to start taosd + # use new wal file to start taosd tdDnodes.start(1) sleep(5) tdSql.execute("reset query cache") @@ -108,14 +108,14 @@ class TDTestCase: tdSql.checkData(0, 0, 2) tdSql.query("select count(*) from stb2_0") tdSql.checkData(0, 0, 2) - + # delete useless file testcaseFilename = os.path.split(__file__)[-1] os.system("rm -rf ./insert_res.txt") - os.system("rm -rf wal/%s.sql" % testcaseFilename ) - - - + os.system("rm -rf wal/%s.sql" % testcaseFilename) + + + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__)