未验证 提交 d004a035 编写于 作者: sangshuduo's avatar sangshuduo 提交者: GitHub

Merge pull request #5298 from taosdata/hotfix/sangshuduo/TD-2183-fix-coverity-scan-issue

[TD-2183] <fix>: fix coverity scan issue in taosdemo and taosdump.
......@@ -67,6 +67,7 @@
#ifndef ENABLE_VIRTUAL_TERMINAL_PROCESSING
#define ENABLE_VIRTUAL_TERMINAL_PROCESSING 0x0004
#endif
static HANDLE g_stdoutHandle;
static DWORD g_consoleMode;
......@@ -590,6 +591,8 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
break;
case 'x':
arguments->insert_only = true;
break;
case 'y':
arguments->answer_yes = true;
break;
......@@ -807,6 +810,7 @@ static void getResult(TAOS_RES *res, char* resultFileName) {
char* databuf = (char*) calloc(1, 100*1024*1024);
if (databuf == NULL) {
fprintf(stderr, "failed to malloc, warning: save result to file slowly!\n");
fclose(fp);
return ;
}
......@@ -1396,7 +1400,7 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) {
return -1;
}
strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes);
tstrncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes);
xFormatTimestamp(dbInfos[count]->create_time, *(int64_t*)row[TSDB_SHOW_DB_CREATED_TIME_INDEX], TSDB_TIME_PRECISION_MILLI);
dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
......@@ -1404,7 +1408,7 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) {
dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
strncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], fields[TSDB_SHOW_DB_KEEP_INDEX].bytes);
tstrncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], fields[TSDB_SHOW_DB_KEEP_INDEX].bytes);
dbInfos[count]->cache = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]);
dbInfos[count]->blocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]);
dbInfos[count]->minrows = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]);
......@@ -1414,9 +1418,9 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) {
dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
strncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes);
tstrncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes);
dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
strncpy(dbInfos[count]->status, (char *)row[TSDB_SHOW_DB_STATUS_INDEX], fields[TSDB_SHOW_DB_STATUS_INDEX].bytes);
tstrncpy(dbInfos[count]->status, (char *)row[TSDB_SHOW_DB_STATUS_INDEX], fields[TSDB_SHOW_DB_STATUS_INDEX].bytes);
count++;
if (count > MAX_DATABASE_COUNT) {
......@@ -1429,14 +1433,14 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) {
}
static void printfDbInfoForQueryToFile(char* filename, SDbInfo* dbInfos, int index) {
FILE *fp = NULL;
if (filename[0] != 0) {
fp = fopen(filename, "at");
if (filename[0] == 0)
return;
FILE *fp = fopen(filename, "at");
if (fp == NULL) {
fprintf(stderr, "failed to open file: %s\n", filename);
return;
}
}
fprintf(fp, "================ database[%d] ================\n", index);
fprintf(fp, "name: %s\n", dbInfos->name);
......@@ -1492,7 +1496,10 @@ static void printfQuerySystemInfo(TAOS * taos) {
return;
}
int dbCount = getDbFromServer(taos, dbInfos);
if (dbCount <= 0) return;
if (dbCount <= 0) {
free(dbInfos);
return;
}
for (int i = 0; i < dbCount; i++) {
// printf database info
......@@ -1829,7 +1836,7 @@ static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName, char* sTblName
char* pTblName = childTblName;
while ((row = taos_fetch_row(res)) != NULL) {
int32_t* len = taos_fetch_lengths(res);
strncpy(pTblName, (char *)row[0], len[0]);
tstrncpy(pTblName, (char *)row[0], len[0]);
//printf("==== sub table name: %s\n", pTblName);
count++;
if (count >= childTblCount - 1) {
......@@ -1883,16 +1890,16 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName, SSuperTable* supe
}
if (strcmp((char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], "TAG") == 0) {
strncpy(superTbls->tags[tagIndex].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
strncpy(superTbls->tags[tagIndex].dataType, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes);
tstrncpy(superTbls->tags[tagIndex].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
tstrncpy(superTbls->tags[tagIndex].dataType, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes);
superTbls->tags[tagIndex].dataLen = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
strncpy(superTbls->tags[tagIndex].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes);
tstrncpy(superTbls->tags[tagIndex].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes);
tagIndex++;
} else {
strncpy(superTbls->columns[columnIndex].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
strncpy(superTbls->columns[columnIndex].dataType, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes);
tstrncpy(superTbls->columns[columnIndex].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
tstrncpy(superTbls->columns[columnIndex].dataType, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes);
superTbls->columns[columnIndex].dataLen = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
strncpy(superTbls->columns[columnIndex].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes);
tstrncpy(superTbls->columns[columnIndex].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes);
columnIndex++;
}
count++;
......@@ -2449,8 +2456,8 @@ static bool getColumnAndTagTypeFromInsertJsonFile(cJSON* stbInfo, SSuperTable* s
printf("failed to read json, column type not found");
goto PARSE_OVER;
}
//strncpy(superTbls->columns[k].dataType, dataType->valuestring, MAX_TB_NAME_SIZE);
strncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE);
//tstrncpy(superTbls->columns[k].dataType, dataType->valuestring, MAX_TB_NAME_SIZE);
tstrncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE);
cJSON* dataLen = cJSON_GetObjectItem(column, "len");
if (dataLen && dataLen->type == cJSON_Number) {
......@@ -2463,7 +2470,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(cJSON* stbInfo, SSuperTable* s
}
for (int n = 0; n < count; ++n) {
strncpy(superTbls->columns[index].dataType, columnCase.dataType, MAX_TB_NAME_SIZE);
tstrncpy(superTbls->columns[index].dataType, columnCase.dataType, MAX_TB_NAME_SIZE);
superTbls->columns[index].dataLen = columnCase.dataLen;
index++;
}
......@@ -2508,7 +2515,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(cJSON* stbInfo, SSuperTable* s
printf("failed to read json, tag type not found");
goto PARSE_OVER;
}
strncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE);
tstrncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE);
cJSON* dataLen = cJSON_GetObjectItem(tag, "len");
if (dataLen && dataLen->type == cJSON_Number) {
......@@ -2521,7 +2528,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(cJSON* stbInfo, SSuperTable* s
}
for (int n = 0; n < count; ++n) {
strncpy(superTbls->tags[index].dataType, columnCase.dataType, MAX_TB_NAME_SIZE);
tstrncpy(superTbls->tags[index].dataType, columnCase.dataType, MAX_TB_NAME_SIZE);
superTbls->tags[index].dataLen = columnCase.dataLen;
index++;
}
......@@ -2542,14 +2549,14 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* cfgdir = cJSON_GetObjectItem(root, "cfgdir");
if (cfgdir && cfgdir->type == cJSON_String && cfgdir->valuestring != NULL) {
strncpy(g_Dbs.cfgDir, cfgdir->valuestring, MAX_FILE_NAME_LEN);
tstrncpy(g_Dbs.cfgDir, cfgdir->valuestring, MAX_FILE_NAME_LEN);
}
cJSON* host = cJSON_GetObjectItem(root, "host");
if (host && host->type == cJSON_String && host->valuestring != NULL) {
strncpy(g_Dbs.host, host->valuestring, MAX_DB_NAME_SIZE);
tstrncpy(g_Dbs.host, host->valuestring, MAX_DB_NAME_SIZE);
} else if (!host) {
strncpy(g_Dbs.host, "127.0.0.1", MAX_DB_NAME_SIZE);
tstrncpy(g_Dbs.host, "127.0.0.1", MAX_DB_NAME_SIZE);
} else {
printf("failed to read json, host not found\n");
goto PARSE_OVER;
......@@ -2564,23 +2571,23 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* user = cJSON_GetObjectItem(root, "user");
if (user && user->type == cJSON_String && user->valuestring != NULL) {
strncpy(g_Dbs.user, user->valuestring, MAX_DB_NAME_SIZE);
tstrncpy(g_Dbs.user, user->valuestring, MAX_DB_NAME_SIZE);
} else if (!user) {
strncpy(g_Dbs.user, "root", MAX_DB_NAME_SIZE);
tstrncpy(g_Dbs.user, "root", MAX_DB_NAME_SIZE);
}
cJSON* password = cJSON_GetObjectItem(root, "password");
if (password && password->type == cJSON_String && password->valuestring != NULL) {
strncpy(g_Dbs.password, password->valuestring, MAX_DB_NAME_SIZE);
tstrncpy(g_Dbs.password, password->valuestring, MAX_DB_NAME_SIZE);
} else if (!password) {
strncpy(g_Dbs.password, "taosdata", MAX_DB_NAME_SIZE);
tstrncpy(g_Dbs.password, "taosdata", MAX_DB_NAME_SIZE);
}
cJSON* resultfile = cJSON_GetObjectItem(root, "result_file");
if (resultfile && resultfile->type == cJSON_String && resultfile->valuestring != NULL) {
strncpy(g_Dbs.resultFile, resultfile->valuestring, MAX_FILE_NAME_LEN);
tstrncpy(g_Dbs.resultFile, resultfile->valuestring, MAX_FILE_NAME_LEN);
} else if (!resultfile) {
strncpy(g_Dbs.resultFile, "./insert_res.txt", MAX_FILE_NAME_LEN);
tstrncpy(g_Dbs.resultFile, "./insert_res.txt", MAX_FILE_NAME_LEN);
}
cJSON* threads = cJSON_GetObjectItem(root, "thread_count");
......@@ -2648,7 +2655,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
printf("failed to read json, db name not found");
goto PARSE_OVER;
}
strncpy(g_Dbs.db[i].dbName, dbName->valuestring, MAX_DB_NAME_SIZE);
tstrncpy(g_Dbs.db[i].dbName, dbName->valuestring, MAX_DB_NAME_SIZE);
cJSON *drop = cJSON_GetObjectItem(dbinfo, "drop");
if (drop && drop->type == cJSON_String && drop->valuestring != NULL) {
......@@ -2666,9 +2673,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON *precision = cJSON_GetObjectItem(dbinfo, "precision");
if (precision && precision->type == cJSON_String && precision->valuestring != NULL) {
strncpy(g_Dbs.db[i].dbCfg.precision, precision->valuestring, MAX_DB_NAME_SIZE);
tstrncpy(g_Dbs.db[i].dbCfg.precision, precision->valuestring, MAX_DB_NAME_SIZE);
} else if (!precision) {
//strncpy(g_Dbs.db[i].dbCfg.precision, "ms", MAX_DB_NAME_SIZE);
//tstrncpy(g_Dbs.db[i].dbCfg.precision, "ms", MAX_DB_NAME_SIZE);
memset(g_Dbs.db[i].dbCfg.precision, 0, MAX_DB_NAME_SIZE);
} else {
printf("failed to read json, precision not found");
......@@ -2829,14 +2836,14 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
printf("failed to read json, stb name not found");
goto PARSE_OVER;
}
strncpy(g_Dbs.db[i].superTbls[j].sTblName, stbName->valuestring, MAX_TB_NAME_SIZE);
tstrncpy(g_Dbs.db[i].superTbls[j].sTblName, stbName->valuestring, MAX_TB_NAME_SIZE);
cJSON *prefix = cJSON_GetObjectItem(stbInfo, "childtable_prefix");
if (!prefix || prefix->type != cJSON_String || prefix->valuestring == NULL) {
printf("failed to read json, childtable_prefix not found");
goto PARSE_OVER;
}
strncpy(g_Dbs.db[i].superTbls[j].childTblPrefix, prefix->valuestring, MAX_DB_NAME_SIZE);
tstrncpy(g_Dbs.db[i].superTbls[j].childTblPrefix, prefix->valuestring, MAX_DB_NAME_SIZE);
cJSON *autoCreateTbl = cJSON_GetObjectItem(stbInfo, "auto_create_table"); // yes, no, null
if (autoCreateTbl && autoCreateTbl->type == cJSON_String && autoCreateTbl->valuestring != NULL) {
......@@ -2889,9 +2896,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON *dataSource = cJSON_GetObjectItem(stbInfo, "data_source");
if (dataSource && dataSource->type == cJSON_String && dataSource->valuestring != NULL) {
strncpy(g_Dbs.db[i].superTbls[j].dataSource, dataSource->valuestring, MAX_DB_NAME_SIZE);
tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, dataSource->valuestring, MAX_DB_NAME_SIZE);
} else if (!dataSource) {
strncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", MAX_DB_NAME_SIZE);
tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", MAX_DB_NAME_SIZE);
} else {
printf("failed to read json, data_source not found");
goto PARSE_OVER;
......@@ -2899,7 +2906,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON *insertMode = cJSON_GetObjectItem(stbInfo, "insert_mode"); // taosc , restful
if (insertMode && insertMode->type == cJSON_String && insertMode->valuestring != NULL) {
strncpy(g_Dbs.db[i].superTbls[j].insertMode, insertMode->valuestring, MAX_DB_NAME_SIZE);
tstrncpy(g_Dbs.db[i].superTbls[j].insertMode, insertMode->valuestring, MAX_DB_NAME_SIZE);
#ifndef TD_LOWA_CURL
if (0 == strncasecmp(g_Dbs.db[i].superTbls[j].insertMode, "restful", 7)) {
printf("There no libcurl, so no support resetful test! please use taosc mode.\n");
......@@ -2907,7 +2914,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
}
#endif
} else if (!insertMode) {
strncpy(g_Dbs.db[i].superTbls[j].insertMode, "taosc", MAX_DB_NAME_SIZE);
tstrncpy(g_Dbs.db[i].superTbls[j].insertMode, "taosc", MAX_DB_NAME_SIZE);
} else {
printf("failed to read json, insert_mode not found");
goto PARSE_OVER;
......@@ -2915,9 +2922,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON *ts = cJSON_GetObjectItem(stbInfo, "start_timestamp");
if (ts && ts->type == cJSON_String && ts->valuestring != NULL) {
strncpy(g_Dbs.db[i].superTbls[j].startTimestamp, ts->valuestring, MAX_DB_NAME_SIZE);
tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp, ts->valuestring, MAX_DB_NAME_SIZE);
} else if (!ts) {
strncpy(g_Dbs.db[i].superTbls[j].startTimestamp, "now", MAX_DB_NAME_SIZE);
tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp, "now", MAX_DB_NAME_SIZE);
} else {
printf("failed to read json, start_timestamp not found");
goto PARSE_OVER;
......@@ -2948,9 +2955,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON *sampleFormat = cJSON_GetObjectItem(stbInfo, "sample_format");
if (sampleFormat && sampleFormat->type == cJSON_String && sampleFormat->valuestring != NULL) {
strncpy(g_Dbs.db[i].superTbls[j].sampleFormat, sampleFormat->valuestring, MAX_DB_NAME_SIZE);
tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, sampleFormat->valuestring, MAX_DB_NAME_SIZE);
} else if (!sampleFormat) {
strncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv", MAX_DB_NAME_SIZE);
tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv", MAX_DB_NAME_SIZE);
} else {
printf("failed to read json, sample_format not found");
goto PARSE_OVER;
......@@ -2958,7 +2965,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON *sampleFile = cJSON_GetObjectItem(stbInfo, "sample_file");
if (sampleFile && sampleFile->type == cJSON_String && sampleFile->valuestring != NULL) {
strncpy(g_Dbs.db[i].superTbls[j].sampleFile, sampleFile->valuestring, MAX_FILE_NAME_LEN);
tstrncpy(g_Dbs.db[i].superTbls[j].sampleFile, sampleFile->valuestring, MAX_FILE_NAME_LEN);
} else if (!sampleFile) {
memset(g_Dbs.db[i].superTbls[j].sampleFile, 0, MAX_FILE_NAME_LEN);
} else {
......@@ -2968,7 +2975,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON *tagsFile = cJSON_GetObjectItem(stbInfo, "tags_file");
if (tagsFile && tagsFile->type == cJSON_String && tagsFile->valuestring != NULL) {
strncpy(g_Dbs.db[i].superTbls[j].tagsFile, tagsFile->valuestring, MAX_FILE_NAME_LEN);
tstrncpy(g_Dbs.db[i].superTbls[j].tagsFile, tagsFile->valuestring, MAX_FILE_NAME_LEN);
if (0 == g_Dbs.db[i].superTbls[j].tagsFile[0]) {
g_Dbs.db[i].superTbls[j].tagSource = 0;
} else {
......@@ -3100,14 +3107,14 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* cfgdir = cJSON_GetObjectItem(root, "cfgdir");
if (cfgdir && cfgdir->type == cJSON_String && cfgdir->valuestring != NULL) {
strncpy(g_queryInfo.cfgDir, cfgdir->valuestring, MAX_FILE_NAME_LEN);
tstrncpy(g_queryInfo.cfgDir, cfgdir->valuestring, MAX_FILE_NAME_LEN);
}
cJSON* host = cJSON_GetObjectItem(root, "host");
if (host && host->type == cJSON_String && host->valuestring != NULL) {
strncpy(g_queryInfo.host, host->valuestring, MAX_DB_NAME_SIZE);
tstrncpy(g_queryInfo.host, host->valuestring, MAX_DB_NAME_SIZE);
} else if (!host) {
strncpy(g_queryInfo.host, "127.0.0.1", MAX_DB_NAME_SIZE);
tstrncpy(g_queryInfo.host, "127.0.0.1", MAX_DB_NAME_SIZE);
} else {
printf("failed to read json, host not found\n");
goto PARSE_OVER;
......@@ -3122,16 +3129,16 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* user = cJSON_GetObjectItem(root, "user");
if (user && user->type == cJSON_String && user->valuestring != NULL) {
strncpy(g_queryInfo.user, user->valuestring, MAX_DB_NAME_SIZE);
tstrncpy(g_queryInfo.user, user->valuestring, MAX_DB_NAME_SIZE);
} else if (!user) {
strncpy(g_queryInfo.user, "root", MAX_DB_NAME_SIZE); ;
tstrncpy(g_queryInfo.user, "root", MAX_DB_NAME_SIZE); ;
}
cJSON* password = cJSON_GetObjectItem(root, "password");
if (password && password->type == cJSON_String && password->valuestring != NULL) {
strncpy(g_queryInfo.password, password->valuestring, MAX_DB_NAME_SIZE);
tstrncpy(g_queryInfo.password, password->valuestring, MAX_DB_NAME_SIZE);
} else if (!password) {
strncpy(g_queryInfo.password, "taosdata", MAX_DB_NAME_SIZE);;
tstrncpy(g_queryInfo.password, "taosdata", MAX_DB_NAME_SIZE);;
}
cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no,
......@@ -3152,7 +3159,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* dbs = cJSON_GetObjectItem(root, "databases");
if (dbs && dbs->type == cJSON_String && dbs->valuestring != NULL) {
strncpy(g_queryInfo.dbName, dbs->valuestring, MAX_DB_NAME_SIZE);
tstrncpy(g_queryInfo.dbName, dbs->valuestring, MAX_DB_NAME_SIZE);
} else if (!dbs) {
printf("failed to read json, databases not found\n");
goto PARSE_OVER;
......@@ -3160,9 +3167,9 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* queryMode = cJSON_GetObjectItem(root, "query_mode");
if (queryMode && queryMode->type == cJSON_String && queryMode->valuestring != NULL) {
strncpy(g_queryInfo.queryMode, queryMode->valuestring, MAX_TB_NAME_SIZE);
tstrncpy(g_queryInfo.queryMode, queryMode->valuestring, MAX_TB_NAME_SIZE);
} else if (!queryMode) {
strncpy(g_queryInfo.queryMode, "taosc", MAX_TB_NAME_SIZE);
tstrncpy(g_queryInfo.queryMode, "taosc", MAX_TB_NAME_SIZE);
} else {
printf("failed to read json, query_mode not found\n");
goto PARSE_OVER;
......@@ -3266,11 +3273,11 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
printf("failed to read json, sql not found\n");
goto PARSE_OVER;
}
strncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring, MAX_QUERY_SQL_LENGTH);
tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring, MAX_QUERY_SQL_LENGTH);
cJSON *result = cJSON_GetObjectItem(sql, "result");
if (NULL != result && result->type == cJSON_String && result->valuestring != NULL) {
strncpy(g_queryInfo.superQueryInfo.result[j], result->valuestring, MAX_FILE_NAME_LEN);
tstrncpy(g_queryInfo.superQueryInfo.result[j], result->valuestring, MAX_FILE_NAME_LEN);
} else if (NULL == result) {
memset(g_queryInfo.superQueryInfo.result[j], 0, MAX_FILE_NAME_LEN);
} else {
......@@ -3314,7 +3321,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* stblname = cJSON_GetObjectItem(subQuery, "stblname");
if (stblname && stblname->type == cJSON_String && stblname->valuestring != NULL) {
strncpy(g_queryInfo.subQueryInfo.sTblName, stblname->valuestring, MAX_TB_NAME_SIZE);
tstrncpy(g_queryInfo.subQueryInfo.sTblName, stblname->valuestring, MAX_TB_NAME_SIZE);
} else {
printf("failed to read json, super table name not found\n");
goto PARSE_OVER;
......@@ -3395,11 +3402,11 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
printf("failed to read json, sql not found\n");
goto PARSE_OVER;
}
strncpy(g_queryInfo.subQueryInfo.sql[j], sqlStr->valuestring, MAX_QUERY_SQL_LENGTH);
tstrncpy(g_queryInfo.subQueryInfo.sql[j], sqlStr->valuestring, MAX_QUERY_SQL_LENGTH);
cJSON *result = cJSON_GetObjectItem(sql, "result");
if (result != NULL && result->type == cJSON_String && result->valuestring != NULL){
strncpy(g_queryInfo.subQueryInfo.result[j], result->valuestring, MAX_FILE_NAME_LEN);
tstrncpy(g_queryInfo.subQueryInfo.result[j], result->valuestring, MAX_FILE_NAME_LEN);
} else if (NULL == result) {
memset(g_queryInfo.subQueryInfo.result[j], 0, MAX_FILE_NAME_LEN);
} else {
......@@ -4240,7 +4247,7 @@ void startMultiThreadInsertData(int threads, char* db_name, char* precision, SSu
int64_t totalDelay = 0;
int64_t maxDelay = 0;
int64_t minDelay = INT16_MAX;
int64_t cntDelay = 0;
int64_t cntDelay = 1;
double avgDelay = 0;
for (int i = 0; i < threads; i++) {
......@@ -4262,7 +4269,9 @@ void startMultiThreadInsertData(int threads, char* db_name, char* precision, SSu
}
#endif
}
cntDelay -= 1;
assert(cntDelay != 0);
avgDelay = (double)totalDelay / cntDelay;
double end = getCurrentTime();
......@@ -4326,6 +4335,7 @@ void *readTable(void *sarg) {
fprintf(stderr, "Failed to query:%s\n", taos_errstr(pSql));
taos_free_result(pSql);
taos_close(taos);
fclose(fp);
return NULL;
}
......@@ -4401,6 +4411,7 @@ void *readMetric(void *sarg) {
fprintf(stderr, "Failed to query:%s\n", taos_errstr(pSql));
taos_free_result(pSql);
taos_close(taos);
fclose(fp);
return NULL;
}
int count = 0;
......@@ -4521,7 +4532,7 @@ void *superQueryProcess(void *sarg) {
int64_t st = 0;
int64_t et = 0;
while (1) {
if (g_queryInfo.superQueryInfo.rate && (et - st) < g_queryInfo.superQueryInfo.rate*1000) {
if (g_queryInfo.superQueryInfo.rate && (et - st) < (int64_t)g_queryInfo.superQueryInfo.rate*1000) {
taosMsleep(g_queryInfo.superQueryInfo.rate*1000 - (et - st)); // ms
//printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id);
}
......@@ -4569,7 +4580,7 @@ void replaceSubTblName(char* inSql, char* outSql, int tblIndex) {
return;
}
strncpy(outSql, inSql, pos - inSql);
tstrncpy(outSql, inSql, pos - inSql);
//printf("1: %s\n", outSql);
strcat(outSql, subTblName);
//printf("2: %s\n", outSql);
......@@ -4581,7 +4592,7 @@ void *subQueryProcess(void *sarg) {
char sqlstr[1024];
threadInfo *winfo = (threadInfo *)sarg;
int64_t st = 0;
int64_t et = g_queryInfo.subQueryInfo.rate*1000;
int64_t et = (int64_t)g_queryInfo.subQueryInfo.rate*1000;
while (1) {
if (g_queryInfo.subQueryInfo.rate && (et - st) < g_queryInfo.subQueryInfo.rate*1000) {
taosMsleep(g_queryInfo.subQueryInfo.rate*1000 - (et - st)); // ms
......@@ -4974,10 +4985,10 @@ void initOfInsertMeta() {
memset(&g_Dbs, 0, sizeof(SDbs));
// set default values
strncpy(g_Dbs.host, "127.0.0.1", MAX_DB_NAME_SIZE);
tstrncpy(g_Dbs.host, "127.0.0.1", MAX_DB_NAME_SIZE);
g_Dbs.port = 6030;
strncpy(g_Dbs.user, TSDB_DEFAULT_USER, MAX_DB_NAME_SIZE);
strncpy(g_Dbs.password, TSDB_DEFAULT_PASS, MAX_DB_NAME_SIZE);
tstrncpy(g_Dbs.user, TSDB_DEFAULT_USER, MAX_DB_NAME_SIZE);
tstrncpy(g_Dbs.password, TSDB_DEFAULT_PASS, MAX_DB_NAME_SIZE);
g_Dbs.threadCount = 2;
g_Dbs.use_metric = true;
}
......@@ -4986,17 +4997,17 @@ void initOfQueryMeta() {
memset(&g_queryInfo, 0, sizeof(SQueryMetaInfo));
// set default values
strncpy(g_queryInfo.host, "127.0.0.1", MAX_DB_NAME_SIZE);
tstrncpy(g_queryInfo.host, "127.0.0.1", MAX_DB_NAME_SIZE);
g_queryInfo.port = 6030;
strncpy(g_queryInfo.user, TSDB_DEFAULT_USER, MAX_DB_NAME_SIZE);
strncpy(g_queryInfo.password, TSDB_DEFAULT_PASS, MAX_DB_NAME_SIZE);
tstrncpy(g_queryInfo.user, TSDB_DEFAULT_USER, MAX_DB_NAME_SIZE);
tstrncpy(g_queryInfo.password, TSDB_DEFAULT_PASS, MAX_DB_NAME_SIZE);
}
void setParaFromArg(){
if (g_args.host) {
strcpy(g_Dbs.host, g_args.host);
} else {
strncpy(g_Dbs.host, "127.0.0.1", MAX_DB_NAME_SIZE);
tstrncpy(g_Dbs.host, "127.0.0.1", MAX_DB_NAME_SIZE);
}
if (g_args.user) {
......@@ -5014,18 +5025,18 @@ void setParaFromArg(){
g_Dbs.dbCount = 1;
g_Dbs.db[0].drop = 1;
strncpy(g_Dbs.db[0].dbName, g_args.database, MAX_DB_NAME_SIZE);
tstrncpy(g_Dbs.db[0].dbName, g_args.database, MAX_DB_NAME_SIZE);
g_Dbs.db[0].dbCfg.replica = g_args.replica;
strncpy(g_Dbs.db[0].dbCfg.precision, "ms", MAX_DB_NAME_SIZE);
tstrncpy(g_Dbs.db[0].dbCfg.precision, "ms", MAX_DB_NAME_SIZE);
strncpy(g_Dbs.resultFile, g_args.output_file, MAX_FILE_NAME_LEN);
tstrncpy(g_Dbs.resultFile, g_args.output_file, MAX_FILE_NAME_LEN);
g_Dbs.use_metric = g_args.use_metric;
g_Dbs.insert_only = g_args.insert_only;
g_Dbs.db[0].superTblCount = 1;
strncpy(g_Dbs.db[0].superTbls[0].sTblName, "meters", MAX_TB_NAME_SIZE);
tstrncpy(g_Dbs.db[0].superTbls[0].sTblName, "meters", MAX_TB_NAME_SIZE);
g_Dbs.db[0].superTbls[0].childTblCount = g_args.num_of_tables;
g_Dbs.threadCount = g_args.num_of_threads;
g_Dbs.threadCountByCreateTbl = 1;
......@@ -5037,10 +5048,10 @@ void setParaFromArg(){
g_Dbs.db[0].superTbls[0].insertRate = 0;
g_Dbs.db[0].superTbls[0].disorderRange = g_args.disorderRange;
g_Dbs.db[0].superTbls[0].disorderRatio = g_args.disorderRatio;
strncpy(g_Dbs.db[0].superTbls[0].childTblPrefix, g_args.tb_prefix, MAX_TB_NAME_SIZE);
strncpy(g_Dbs.db[0].superTbls[0].dataSource, "rand", MAX_TB_NAME_SIZE);
strncpy(g_Dbs.db[0].superTbls[0].insertMode, "taosc", MAX_TB_NAME_SIZE);
strncpy(g_Dbs.db[0].superTbls[0].startTimestamp, "2017-07-14 10:40:00.000", MAX_TB_NAME_SIZE);
tstrncpy(g_Dbs.db[0].superTbls[0].childTblPrefix, g_args.tb_prefix, MAX_TB_NAME_SIZE);
tstrncpy(g_Dbs.db[0].superTbls[0].dataSource, "rand", MAX_TB_NAME_SIZE);
tstrncpy(g_Dbs.db[0].superTbls[0].insertMode, "taosc", MAX_TB_NAME_SIZE);
tstrncpy(g_Dbs.db[0].superTbls[0].startTimestamp, "2017-07-14 10:40:00.000", MAX_TB_NAME_SIZE);
g_Dbs.db[0].superTbls[0].timeStampStep = 10;
// g_args.num_of_RPR;
......@@ -5064,7 +5075,7 @@ void setParaFromArg(){
break;
}
strncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, data_type[i], MAX_TB_NAME_SIZE);
tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, data_type[i], MAX_TB_NAME_SIZE);
g_Dbs.db[0].superTbls[0].columns[i].dataLen = g_args.len_of_binary;
g_Dbs.db[0].superTbls[0].columnCount++;
}
......@@ -5073,17 +5084,17 @@ void setParaFromArg(){
g_Dbs.db[0].superTbls[0].columnCount = g_args.num_of_CPR;
} else {
for (int i = g_Dbs.db[0].superTbls[0].columnCount; i < g_args.num_of_CPR; i++) {
strncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, "INT", MAX_TB_NAME_SIZE);
tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, "INT", MAX_TB_NAME_SIZE);
g_Dbs.db[0].superTbls[0].columns[i].dataLen = 0;
g_Dbs.db[0].superTbls[0].columnCount++;
}
}
if (g_Dbs.use_metric) {
strncpy(g_Dbs.db[0].superTbls[0].tags[0].dataType, "INT", MAX_TB_NAME_SIZE);
tstrncpy(g_Dbs.db[0].superTbls[0].tags[0].dataType, "INT", MAX_TB_NAME_SIZE);
g_Dbs.db[0].superTbls[0].tags[0].dataLen = 0;
strncpy(g_Dbs.db[0].superTbls[0].tags[1].dataType, "BINARY", MAX_TB_NAME_SIZE);
tstrncpy(g_Dbs.db[0].superTbls[0].tags[1].dataType, "BINARY", MAX_TB_NAME_SIZE);
g_Dbs.db[0].superTbls[0].tags[1].dataLen = g_args.len_of_binary;
g_Dbs.db[0].superTbls[0].tagCount = 2;
} else {
......
......@@ -793,6 +793,7 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu
*totalNumOfThread = numOfThread;
free(tblBuf);
return 0;
}
......@@ -1553,6 +1554,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
(void)remove(tmpBuf);
}
free(tblBuf);
return 0;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册