未验证 提交 cdb8aeba 编写于 作者: Y Yang Zhao 提交者: GitHub

[TD-10719]remove magic number (#8350)

上级 ec3c5d5b
......@@ -56,6 +56,7 @@
#define REQ_EXTRA_BUF_LEN 1024
#define RESP_BUF_LEN 4096
#define SQL_BUFF_LEN 1024
extern char configDir[];
......@@ -66,6 +67,7 @@ extern char configDir[];
#define HEAD_BUFF_LEN TSDB_MAX_COLUMNS*24 // 16*MAX_COLUMNS + (192+32)*2 + insert into ..
#define BUFFER_SIZE TSDB_MAX_ALLOWED_SQL_LEN
#define FETCH_BUFFER_SIZE 100 * TSDB_MAX_ALLOWED_SQL_LEN
#define COND_BUF_LEN (BUFFER_SIZE - 30)
#define COL_BUFFER_LEN ((TSDB_COL_NAME_LEN + 15) * TSDB_MAX_COLUMNS)
......@@ -87,6 +89,7 @@ extern char configDir[];
#define FLOAT_BUFF_LEN 22
#define DOUBLE_BUFF_LEN 42
#define TIMESTAMP_BUFF_LEN 21
#define PRINT_STAT_INTERVAL 30*1000
#define MAX_SAMPLES 10000
#define MAX_NUM_COLUMNS (TSDB_MAX_COLUMNS - 1) // exclude first column timestamp
......@@ -97,8 +100,10 @@ extern char configDir[];
#define MAX_QUERY_SQL_COUNT 100
#define MAX_DATABASE_COUNT 256
#define INPUT_BUF_LEN 256
#define MAX_JSON_BUFF 6400000
#define INPUT_BUF_LEN 256
#define EXTRA_SQL_LEN 256
#define TBNAME_PREFIX_LEN (TSDB_TABLE_NAME_LEN - 20) // 20 characters reserved for seq
#define SMALL_BUFF_LEN 8
#define DATATYPE_BUFF_LEN (SMALL_BUFF_LEN*3)
......@@ -109,6 +114,45 @@ extern char configDir[];
#define DEFAULT_INTERLACE_ROWS 0
#define DEFAULT_DATATYPE_NUM 1
#define DEFAULT_CHILDTABLES 10000
#define DEFAULT_TEST_MODE 0
#define DEFAULT_METAFILE NULL
#define DEFAULT_SQLFILE NULL
#define DEFAULT_HOST "localhost"
#define DEFAULT_PORT 6030
#define DEFAULT_IFACE INTERFACE_BUT
#define DEFAULT_DATABASE "test"
#define DEFAULT_REPLICA 1
#define DEFAULT_TB_PREFIX "d"
#define DEFAULT_ESCAPE_CHAR false
#define DEFAULT_USE_METRIC true
#define DEFAULT_DROP_DB true
#define DEFAULT_AGGR_FUNC false
#define DEFAULT_DEBUG false
#define DEFAULT_VERBOSE false
#define DEFAULT_PERF_STAT false
#define DEFAULT_ANS_YES false
#define DEFAULT_OUTPUT "./output.txt"
#define DEFAULT_SYNC_MODE 0
#define DEFAULT_DATA_TYPE {TSDB_DATA_TYPE_FLOAT,TSDB_DATA_TYPE_INT,TSDB_DATA_TYPE_FLOAT}
#define DEFAULT_DATATYPE {"FLOAT","INT","FLOAT"}
#define DEFAULT_BINWIDTH 64
#define DEFAULT_COL_COUNT 4
#define DEFAULT_LEN_ONE_ROW 76
#define DEFAULT_INSERT_INTERVAL 0
#define DEFAULT_QUERY_TIME 1
#define DEFAULT_PREPARED_RAND 10000
#define DEFAULT_REQ_PER_REQ 30000
#define DEFAULT_INSERT_ROWS 10000
#define DEFAULT_ABORT 0
#define DEFAULT_RATIO 0
#define DEFAULT_DISORDER_RANGE 1000
#define DEFAULT_METHOD_DEL 1
#define DEFAULT_TOTAL_INSERT 0
#define DEFAULT_TOTAL_AFFECT 0
#define DEFAULT_DEMO_MODE true
#define DEFAULT_CREATE_BATCH 10
#define DEFAULT_SUB_INTERVAL 10000
#define DEFAULT_QUERY_INTERVAL 10000
#define STMT_BIND_PARAM_BATCH 1
......@@ -467,7 +511,7 @@ typedef struct SThreadInfo_S {
int threadID;
char db_name[TSDB_DB_NAME_LEN];
uint32_t time_precision;
char filePath[4096];
char filePath[TSDB_FILENAME_LEN];
FILE *fp;
char tb_prefix[TSDB_TABLE_NAME_LEN];
uint64_t start_table_from;
......@@ -624,63 +668,49 @@ char *g_aggreFunc[] = {"*", "count(*)", "avg(C0)", "sum(C0)",
"max(C0)", "min(C0)", "first(C0)", "last(C0)"};
SArguments g_args = {
NULL, // metaFile
0, // test_mode
"localhost", // host
6030, // port
INTERFACE_BUT, // iface
"root", // user
#ifdef _TD_POWER_
"powerdb", // password
#elif (_TD_TQ_ == true)
"tqueue", // password
#elif (_TD_PRO_ == true)
"prodb", // password
#else
"taosdata", // password
#endif
"test", // database
1, // replica
"d", // tb_prefix
false, // escapeChar
NULL, // sqlFile
true, // use_metric
true, // drop_database
false, // aggr_func
false, // debug_print
false, // verbose_print
false, // performance statistic print
false, // answer_yes;
"./output.txt", // output_file
0, // mode : sync or async
{TSDB_DATA_TYPE_FLOAT,
TSDB_DATA_TYPE_INT,
TSDB_DATA_TYPE_FLOAT},
{
"FLOAT", // dataType
"INT", // dataType
"FLOAT", // dataType. demo mode has 3 columns
},
64, // binwidth
4, // columnCount, timestamp + float + int + float
20 + FLOAT_BUFF_LEN + INT_BUFF_LEN + FLOAT_BUFF_LEN, // lenOfOneRow
DEFAULT_NTHREADS,// nthreads
0, // insert_interval
DEFAULT_TIMESTAMP_STEP, // timestamp_step
1, // query_times
10000, // prepared_rand
DEFAULT_INTERLACE_ROWS, // interlaceRows;
30000, // reqPerReq
(1024*1024), // max_sql_len
DEFAULT_CHILDTABLES, // ntables
10000, // insertRows
0, // abort
0, // disorderRatio
1000, // disorderRange
1, // method_of_delete
0, // totalInsertRows;
0, // totalAffectedRows;
true, // demo_mode;
DEFAULT_METAFILE, // metaFile
DEFAULT_TEST_MODE, // test_mode
DEFAULT_HOST, // host
DEFAULT_PORT, // port
DEFAULT_IFACE, // iface
TSDB_DEFAULT_USER, // user
TSDB_DEFAULT_PASS, // password
DEFAULT_DATABASE, // database
DEFAULT_REPLICA, // replica
DEFAULT_TB_PREFIX, // tb_prefix
DEFAULT_ESCAPE_CHAR, // escapeChar
DEFAULT_SQLFILE, // sqlFile
DEFAULT_USE_METRIC, // use_metric
DEFAULT_DROP_DB, // drop_database
DEFAULT_AGGR_FUNC, // aggr_func
DEFAULT_DEBUG, // debug_print
DEFAULT_VERBOSE, // verbose_print
DEFAULT_PERF_STAT, // performance statistic print
DEFAULT_ANS_YES, // answer_yes;
DEFAULT_OUTPUT, // output_file
DEFAULT_SYNC_MODE, // mode : sync or async
DEFAULT_DATA_TYPE, // data_type
DEFAULT_DATATYPE, // dataType
DEFAULT_BINWIDTH, // binwidth
DEFAULT_COL_COUNT, // columnCount, timestamp + float + int + float
DEFAULT_LEN_ONE_ROW, // lenOfOneRow
DEFAULT_NTHREADS, // nthreads
DEFAULT_INSERT_INTERVAL, // insert_interval
DEFAULT_TIMESTAMP_STEP, // timestamp_step
DEFAULT_QUERY_TIME, // query_times
DEFAULT_PREPARED_RAND, // prepared_rand
DEFAULT_INTERLACE_ROWS, // interlaceRows;
DEFAULT_REQ_PER_REQ, // reqPerReq
TSDB_MAX_ALLOWED_SQL_LEN, // max_sql_len
DEFAULT_CHILDTABLES, // ntables
DEFAULT_INSERT_ROWS, // insertRows
DEFAULT_ABORT, // abort
DEFAULT_RATIO, // disorderRatio
DEFAULT_DISORDER_RANGE, // disorderRange
DEFAULT_METHOD_DEL, // method_of_delete
DEFAULT_TOTAL_INSERT, // totalInsertRows;
DEFAULT_TOTAL_AFFECT, // totalAffectedRows;
DEFAULT_DEMO_MODE, // demo_mode;
};
static SDbs g_Dbs;
......@@ -735,7 +765,7 @@ static FILE * g_fpOfInsertResult = NULL;
///////////////////////////////////////////////////
static void ERROR_EXIT(const char *msg) { errorPrint("%s", msg); exit(-1); }
static void ERROR_EXIT(const char *msg) { errorPrint("%s", msg); exit(EXIT_FAILURE); }
#ifndef TAOSDEMO_COMMIT_SHA1
#define TAOSDEMO_COMMIT_SHA1 "unknown"
......@@ -2010,7 +2040,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
}
g_args.columnCount = columnCount;
g_args.lenOfOneRow = 20; // timestamp
g_args.lenOfOneRow = TIMESTAMP_BUFF_LEN; // timestamp
for (int c = 0; c < g_args.columnCount; c++) {
switch(g_args.data_type[c]) {
case TSDB_DATA_TYPE_BINARY:
......@@ -2172,7 +2202,7 @@ static void fetchResult(TAOS_RES *res, threadInfo* pThreadInfo) {
int num_fields = taos_field_count(res);
TAOS_FIELD *fields = taos_fetch_fields(res);
char* databuf = (char*) calloc(1, 100*1024*1024);
char* databuf = (char*) calloc(1, FETCH_BUFFER_SIZE);
if (databuf == NULL) {
errorPrint2("%s() LN%d, failed to malloc, warning: save result to file slowly!\n",
__func__, __LINE__);
......@@ -2183,11 +2213,11 @@ static void fetchResult(TAOS_RES *res, threadInfo* pThreadInfo) {
// fetch the records row by row
while((row = taos_fetch_row(res))) {
if (totalLen >= (100*1024*1024 - HEAD_BUFF_LEN*2)) {
if (totalLen >= (FETCH_BUFFER_SIZE - HEAD_BUFF_LEN*2)) {
if (strlen(pThreadInfo->filePath) > 0)
appendResultBufToFile(databuf, pThreadInfo);
totalLen = 0;
memset(databuf, 0, 100*1024*1024);
memset(databuf, 0, FETCH_BUFFER_SIZE);
}
num_rows++;
char temp[HEAD_BUFF_LEN] = {0};
......@@ -2249,7 +2279,7 @@ static int32_t rand_bool() {
static int cursor;
cursor++;
if (cursor > (g_args.prepared_rand - 1)) cursor = 0;
return g_randint[cursor % g_args.prepared_rand] % 2;
return g_randint[cursor % g_args.prepared_rand] % TSDB_DATA_BOOL_NULL;
}
static char *rand_tinyint_str()
......@@ -2266,7 +2296,7 @@ static int32_t rand_tinyint()
static int cursor;
cursor++;
if (cursor > (g_args.prepared_rand - 1)) cursor = 0;
return g_randint[cursor % g_args.prepared_rand] % 128;
return g_randint[cursor % g_args.prepared_rand] % TSDB_DATA_TINYINT_NULL;
}
static char *rand_utinyint_str()
......@@ -2283,7 +2313,7 @@ static int32_t rand_utinyint()
static int cursor;
cursor++;
if (cursor > (g_args.prepared_rand - 1)) cursor = 0;
return g_randuint[cursor % g_args.prepared_rand] % 255;
return g_randuint[cursor % g_args.prepared_rand] % TSDB_DATA_UTINYINT_NULL;
}
static char *rand_smallint_str()
......@@ -2300,7 +2330,7 @@ static int32_t rand_smallint()
static int cursor;
cursor++;
if (cursor > (g_args.prepared_rand - 1)) cursor = 0;
return g_randint[cursor % g_args.prepared_rand] % 32768;
return g_randint[cursor % g_args.prepared_rand] % TSDB_DATA_SMALLINT_NULL;
}
static char *rand_usmallint_str()
......@@ -2317,7 +2347,7 @@ static int32_t rand_usmallint()
static int cursor;
cursor++;
if (cursor > (g_args.prepared_rand - 1)) cursor = 0;
return g_randuint[cursor % g_args.prepared_rand] % 65535;
return g_randuint[cursor % g_args.prepared_rand] % TSDB_DATA_USMALLINT_NULL;
}
static char *rand_int_str()
......@@ -3366,7 +3396,7 @@ static void printfDbInfoForQueryToFile(
static void printfQuerySystemInfo(TAOS * taos) {
char filename[MAX_FILE_NAME_LEN] = {0};
char buffer[1024] = {0};
char buffer[SQL_BUFF_LEN] = {0};
TAOS_RES* res;
time_t t;
......@@ -3405,12 +3435,12 @@ static void printfQuerySystemInfo(TAOS * taos) {
printfDbInfoForQueryToFile(filename, dbInfos[i], i);
// show db.vgroups
snprintf(buffer, 1024, "show %s.vgroups;", dbInfos[i]->name);
snprintf(buffer, SQL_BUFF_LEN, "show %s.vgroups;", dbInfos[i]->name);
res = taos_query(taos, buffer);
xDumpResultToFile(filename, res);
// show db.stables
snprintf(buffer, 1024, "show %s.stables;", dbInfos[i]->name);
snprintf(buffer, SQL_BUFF_LEN, "show %s.stables;", dbInfos[i]->name);
res = taos_query(taos, buffer);
xDumpResultToFile(filename, res);
free(dbInfos[i]);
......@@ -3753,7 +3783,7 @@ static int calcRowLen(SSuperTable* superTbls) {
}
}
superTbls->lenOfOneRow = lenOfOneRow + 20; // timestamp
superTbls->lenOfOneRow = lenOfOneRow + TIMESTAMP_BUFF_LEN; // timestamp
int tagIndex;
int lenOfTagOfOneRow = 0;
......@@ -3807,7 +3837,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
char* dbName, char* stbName, char** childTblNameOfSuperTbl,
int64_t* childTblCountOfSuperTbl, int64_t limit, uint64_t offset, bool escapChar) {
char command[1024] = "\0";
char command[SQL_BUFF_LEN] = "\0";
char limitBuf[100] = "\0";
TAOS_RES * res;
......@@ -3819,7 +3849,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
limit, offset);
//get all child table name use cmd: select tbname from superTblName;
snprintf(command, 1024, escapChar ? "select tbname from %s.`%s` %s" :
snprintf(command, SQL_BUFF_LEN, escapChar ? "select tbname from %s.`%s` %s" :
"select tbname from %s.%s %s", dbName, stbName, limitBuf);
res = taos_query(taos, command);
......@@ -3832,7 +3862,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
exit(EXIT_FAILURE);
}
int64_t childTblCount = (limit < 0)?10000:limit;
int64_t childTblCount = (limit < 0)?DEFAULT_CHILDTABLES:limit;
int64_t count = 0;
if (childTblName == NULL) {
childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN);
......@@ -3897,13 +3927,13 @@ static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName,
static int getSuperTableFromServer(TAOS * taos, char* dbName,
SSuperTable* superTbls) {
char command[1024] = "\0";
char command[SQL_BUFF_LEN] = "\0";
TAOS_RES * res;
TAOS_ROW row = NULL;
int count = 0;
//get schema use cmd: describe superTblName;
snprintf(command, 1024, "describe %s.%s", dbName, superTbls->stbName);
snprintf(command, SQL_BUFF_LEN, "describe %s.%s", dbName, superTbls->stbName);
res = taos_query(taos, command);
int32_t code = taos_errno(res);
if (code != 0) {
......@@ -4233,10 +4263,10 @@ static int createSuperTable(
}
}
superTbl->lenOfOneRow = lenOfOneRow + 20; // timestamp
superTbl->lenOfOneRow = lenOfOneRow + TIMESTAMP_BUFF_LEN; // timestamp
// save for creating child table
superTbl->colsOfCreateChildTable = (char*)calloc(len+20, 1);
superTbl->colsOfCreateChildTable = (char*)calloc(len+TIMESTAMP_BUFF_LEN, 1);
if (NULL == superTbl->colsOfCreateChildTable) {
taos_close(taos);
free(command);
......@@ -4245,7 +4275,7 @@ static int createSuperTable(
exit(EXIT_FAILURE);
}
snprintf(superTbl->colsOfCreateChildTable, len+20, "(ts timestamp%s)", cols);
snprintf(superTbl->colsOfCreateChildTable, len+TIMESTAMP_BUFF_LEN, "(ts timestamp%s)", cols);
verbosePrint("%s() LN%d: %s\n",
__func__, __LINE__, superTbl->colsOfCreateChildTable);
......@@ -4592,7 +4622,7 @@ static void* createTable(void *sarg)
batchNum++;
if ((batchNum < stbInfo->batchCreateTableNum)
&& ((buff_len - len)
>= (stbInfo->lenOfTagOfOneRow + 256))) {
>= (stbInfo->lenOfTagOfOneRow + EXTRA_SQL_LEN))) {
continue;
}
}
......@@ -4608,7 +4638,7 @@ static void* createTable(void *sarg)
}
pThreadInfo->tables_created += batchNum;
uint64_t currentPrintTime = taosGetTimestampMs();
if (currentPrintTime - lastPrintTime > 30*1000) {
if (currentPrintTime - lastPrintTime > PRINT_STAT_INTERVAL) {
printf("thread[%d] already create %"PRIu64" - %"PRIu64" tables\n",
pThreadInfo->threadID, pThreadInfo->start_table_from, i);
lastPrintTime = currentPrintTime;
......@@ -4780,7 +4810,7 @@ static int readTagFromCsvFileToMem(SSuperTable * stbInfo) {
stbInfo->tagDataBuf = NULL;
}
int tagCount = 10000;
int tagCount = MAX_SAMPLES;
int count = 0;
char* tagDataBuf = calloc(1, stbInfo->lenOfTagOfOneRow * tagCount);
if (tagDataBuf == NULL) {
......@@ -5186,35 +5216,35 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (port && port->type == cJSON_Number) {
g_Dbs.port = port->valueint;
} else if (!port) {
g_Dbs.port = 6030;
g_Dbs.port = DEFAULT_PORT;
}
cJSON* user = cJSON_GetObjectItem(root, "user");
if (user && user->type == cJSON_String && user->valuestring != NULL) {
tstrncpy(g_Dbs.user, user->valuestring, MAX_USERNAME_SIZE);
} else if (!user) {
tstrncpy(g_Dbs.user, "root", MAX_USERNAME_SIZE);
tstrncpy(g_Dbs.user, TSDB_DEFAULT_USER, MAX_USERNAME_SIZE);
}
cJSON* password = cJSON_GetObjectItem(root, "password");
if (password && password->type == cJSON_String && password->valuestring != NULL) {
tstrncpy(g_Dbs.password, password->valuestring, SHELL_MAX_PASSWORD_LEN);
} else if (!password) {
tstrncpy(g_Dbs.password, "taosdata", SHELL_MAX_PASSWORD_LEN);
tstrncpy(g_Dbs.password, TSDB_DEFAULT_PASS, SHELL_MAX_PASSWORD_LEN);
}
cJSON* resultfile = cJSON_GetObjectItem(root, "result_file");
if (resultfile && resultfile->type == cJSON_String && resultfile->valuestring != NULL) {
tstrncpy(g_Dbs.resultFile, resultfile->valuestring, MAX_FILE_NAME_LEN);
} else if (!resultfile) {
tstrncpy(g_Dbs.resultFile, "./insert_res.txt", MAX_FILE_NAME_LEN);
tstrncpy(g_Dbs.resultFile, DEFAULT_OUTPUT, MAX_FILE_NAME_LEN);
}
cJSON* threads = cJSON_GetObjectItem(root, "thread_count");
if (threads && threads->type == cJSON_Number) {
g_Dbs.threadCount = threads->valueint;
} else if (!threads) {
g_Dbs.threadCount = 1;
g_Dbs.threadCount = DEFAULT_NTHREADS;
} else {
errorPrint("%s", "failed to read json, threads not found\n");
goto PARSE_OVER;
......@@ -5224,7 +5254,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (threads2 && threads2->type == cJSON_Number) {
g_Dbs.threadCountForCreateTbl = threads2->valueint;
} else if (!threads2) {
g_Dbs.threadCountForCreateTbl = 1;
g_Dbs.threadCountForCreateTbl = DEFAULT_NTHREADS;
} else {
errorPrint("%s", "failed to read json, threads2 not found\n");
goto PARSE_OVER;
......@@ -5238,7 +5268,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
}
g_args.insert_interval = gInsertInterval->valueint;
} else if (!gInsertInterval) {
g_args.insert_interval = 0;
g_args.insert_interval = DEFAULT_INSERT_INTERVAL;
} else {
errorPrint("%s", "failed to read json, insert_interval input mistake\n");
goto PARSE_OVER;
......@@ -5253,7 +5283,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
}
g_args.interlaceRows = interlaceRows->valueint;
} else if (!interlaceRows) {
g_args.interlaceRows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req
g_args.interlaceRows = DEFAULT_INTERLACE_ROWS; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req
} else {
errorPrint("%s", "failed to read json, interlaceRows input mistake\n");
goto PARSE_OVER;
......@@ -5268,7 +5298,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
}
g_args.max_sql_len = maxSqlLen->valueint;
} else if (!maxSqlLen) {
g_args.max_sql_len = (1024*1024);
g_args.max_sql_len = TSDB_MAX_ALLOWED_SQL_LEN;
} else {
errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n",
__func__, __LINE__);
......@@ -5307,7 +5337,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
}
g_args.prepared_rand = prepareRand->valueint;
} else if (!prepareRand) {
g_args.prepared_rand = 10000;
g_args.prepared_rand = DEFAULT_PREPARED_RAND;
} else {
errorPrint("%s() LN%d, failed to read json, prepared_rand not found\n",
__func__, __LINE__);
......@@ -5323,7 +5353,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (0 == strncasecmp(answerPrompt->valuestring, "no", 2)) {
g_args.answer_yes = true;
} else {
g_args.answer_yes = false;
g_args.answer_yes = DEFAULT_ANS_YES;
}
} else if (!answerPrompt) {
g_args.answer_yes = true; // default is no, mean answer_yes.
......@@ -5622,7 +5652,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (batchCreateTbl && batchCreateTbl->type == cJSON_Number) {
g_Dbs.db[i].superTbls[j].batchCreateTableNum = batchCreateTbl->valueint;
} else if (!batchCreateTbl) {
g_Dbs.db[i].superTbls[j].batchCreateTableNum = 10;
g_Dbs.db[i].superTbls[j].batchCreateTableNum = DEFAULT_CREATE_BATCH;
} else {
errorPrint("%s", "failed to read json, batch_create_tbl_num not found\n");
goto PARSE_OVER;
......@@ -5904,7 +5934,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (disorderRange && disorderRange->type == cJSON_Number) {
g_Dbs.db[i].superTbls[j].disorderRange = disorderRange->valueint;
} else if (!disorderRange) {
g_Dbs.db[i].superTbls[j].disorderRange = 1000;
g_Dbs.db[i].superTbls[j].disorderRange = DEFAULT_DISORDER_RANGE;
} else {
errorPrint("%s", "failed to read json, disorderRange not found\n");
goto PARSE_OVER;
......@@ -5952,7 +5982,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
if (host && host->type == cJSON_String && host->valuestring != NULL) {
tstrncpy(g_queryInfo.host, host->valuestring, MAX_HOSTNAME_SIZE);
} else if (!host) {
tstrncpy(g_queryInfo.host, "127.0.0.1", MAX_HOSTNAME_SIZE);
tstrncpy(g_queryInfo.host, DEFAULT_HOST, MAX_HOSTNAME_SIZE);
} else {
errorPrint("%s", "failed to read json, host not found\n");
goto PARSE_OVER;
......@@ -5962,21 +5992,21 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
if (port && port->type == cJSON_Number) {
g_queryInfo.port = port->valueint;
} else if (!port) {
g_queryInfo.port = 6030;
g_queryInfo.port = DEFAULT_PORT;
}
cJSON* user = cJSON_GetObjectItem(root, "user");
if (user && user->type == cJSON_String && user->valuestring != NULL) {
tstrncpy(g_queryInfo.user, user->valuestring, MAX_USERNAME_SIZE);
} else if (!user) {
tstrncpy(g_queryInfo.user, "root", MAX_USERNAME_SIZE); ;
tstrncpy(g_queryInfo.user, TSDB_DEFAULT_USER, MAX_USERNAME_SIZE); ;
}
cJSON* password = cJSON_GetObjectItem(root, "password");
if (password && password->type == cJSON_String && password->valuestring != NULL) {
tstrncpy(g_queryInfo.password, password->valuestring, SHELL_MAX_PASSWORD_LEN);
} else if (!password) {
tstrncpy(g_queryInfo.password, "taosdata", SHELL_MAX_PASSWORD_LEN);;
tstrncpy(g_queryInfo.password, TSDB_DEFAULT_PASS, SHELL_MAX_PASSWORD_LEN);;
}
cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no,
......@@ -6004,7 +6034,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
}
g_args.query_times = gQueryTimes->valueint;
} else if (!gQueryTimes) {
g_args.query_times = 1;
g_args.query_times = DEFAULT_QUERY_TIME;
} else {
errorPrint("%s", "failed to read json, query_times input mistake\n");
goto PARSE_OVER;
......@@ -6102,7 +6132,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
} else if (!interval) {
//printf("failed to read json, subscribe interval no found\n");
//goto PARSE_OVER;
g_queryInfo.specifiedQueryInfo.subscribeInterval = 10000;
g_queryInfo.specifiedQueryInfo.subscribeInterval = DEFAULT_SUB_INTERVAL;
}
cJSON* restart = cJSON_GetObjectItem(specifiedQuery, "restart");
......@@ -6249,7 +6279,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
}
g_queryInfo.superQueryInfo.threadCnt = threads->valueint;
} else if (!threads) {
g_queryInfo.superQueryInfo.threadCnt = 1;
g_queryInfo.superQueryInfo.threadCnt = DEFAULT_NTHREADS;
}
//cJSON* subTblCnt = cJSON_GetObjectItem(superQuery, "childtable_count");
......@@ -6294,7 +6324,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
} else if (!superInterval) {
//printf("failed to read json, subscribe interval no found\n");
//goto PARSE_OVER;
g_queryInfo.superQueryInfo.subscribeInterval = 10000;
g_queryInfo.superQueryInfo.subscribeInterval = DEFAULT_QUERY_INTERVAL;
}
cJSON* subrestart = cJSON_GetObjectItem(superQuery, "restart");
......@@ -6414,7 +6444,7 @@ static bool getInfoFromJsonFile(char* file) {
}
bool ret = false;
int maxLen = 6400000;
int maxLen = MAX_JSON_BUFF;
char *content = calloc(1, maxLen + 1);
int len = fread(content, 1, maxLen, fp);
if (len <= 0) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册