提交 430e060a 编写于 作者: B Bomin Zhang

implement format check for log

and fix all format issues
上级 4ef4f60b
......@@ -583,7 +583,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_subscribeImp(JNI
}
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNIEnv *env, jobject jobj, jlong sub) {
jniTrace("jobj:%p, in TSDBJNIConnector_consumeImp, sub:%" PRId64, jobj, sub);
jniTrace("jobj:%p, in TSDBJNIConnector_consumeImp, sub:%lld", jobj, sub);
jniGetGlobalMethod(env);
TAOS_SUB *tsub = (TAOS_SUB *)sub;
......
......@@ -272,7 +272,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) {
if (pRes->code != TSDB_CODE_TSC_QUERY_CANCELLED) {
pRes->code = (rpcMsg->code != TSDB_CODE_SUCCESS) ? rpcMsg->code : TSDB_CODE_RPC_NETWORK_UNAVAIL;
} else {
tscTrace("%p query is cancelled, code:%d", pSql, tstrerror(pRes->code));
tscTrace("%p query is cancelled, code:%s", pSql, tstrerror(pRes->code));
}
if (pRes->code == TSDB_CODE_SUCCESS) {
......@@ -2180,7 +2180,7 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) {
}
pRes->row = 0;
tscTrace("%p numOfRows:%d, offset:%d, complete:%d", pSql, pRes->numOfRows, pRes->offset, pRes->completed);
tscTrace("%p numOfRows:%" PRId64 ", offset:%" PRId64 ", complete:%d", pSql, pRes->numOfRows, pRes->offset, pRes->completed);
return 0;
}
......
......@@ -291,7 +291,7 @@ static int tscLoadSubscriptionProgress(SSub* pSub) {
fclose(fp);
taosArraySort(progress, tscCompareSubscriptionProgress);
tscTrace("subscription progress loaded, %z tables: %s", taosArrayGetSize(progress), pSub->topic);
tscTrace("subscription progress loaded, %zu tables: %s", taosArrayGetSize(progress), pSub->topic);
return 1;
}
......
......@@ -354,7 +354,7 @@ static int32_t tscLaunchSecondPhaseSubqueries(SSqlObj* pSql) {
}
size_t numOfCols = taosArrayGetSize(pNewQueryInfo->colList);
tscTrace("%p subquery:%p tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%z, colList:%z, fieldsInfo:%d, name:%s",
tscTrace("%p subquery:%p tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%zu, colList:%zu, fieldsInfo:%d, name:%s",
pSql, pNew, 0, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, taosArrayGetSize(pNewQueryInfo->exprList),
numOfCols, pNewQueryInfo->fieldsInfo.numOfOutput, pTableMetaInfo->name);
}
......@@ -551,7 +551,7 @@ static void issueTSCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj*
tscTrace(
"%p subquery:%p tableIndex:%d, vgroupIndex:%d, numOfVgroups:%d, type:%d, ts_comp query to retrieve timestamps, "
"numOfExpr:%z, colList:%z, numOfOutputFields:%d, name:%s",
"numOfExpr:%zu, colList:%zu, numOfOutputFields:%d, name:%s",
pParent, pSql, 0, pTableMetaInfo->vgroupIndex, pTableMetaInfo->vgroupList->numOfVgroups, pQueryInfo->type,
tscSqlExprNumOfExprs(pQueryInfo), numOfCols, pQueryInfo->fieldsInfo.numOfOutput, pTableMetaInfo->name);
......@@ -809,7 +809,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
pTableMetaInfo->vgroupIndex += 1;
assert(pTableMetaInfo->vgroupIndex < totalVgroups);
tscTrace("%p results from vgroup index:%d completed, try next vgroup:%d. total vgroups:%d. current numOfRes:%d",
tscTrace("%p results from vgroup index:%d completed, try next vgroup:%d. total vgroups:%d. current numOfRes:%" PRId64,
pSql, pTableMetaInfo->vgroupIndex - 1, pTableMetaInfo->vgroupIndex, totalVgroups,
pRes->numOfClauseTotal);
......@@ -1245,7 +1245,7 @@ int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
tscTrace(
"%p subquery:%p tableIndex:%d, vgroupIndex:%d, type:%d, transfer to tid_tag query to retrieve (tableId, tags), "
"exprInfo:%z, colList:%z, fieldsInfo:%d, tagIndex:%d, name:%s",
"exprInfo:%zu, colList:%zu, fieldsInfo:%d, tagIndex:%d, name:%s",
pSql, pNew, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscSqlExprNumOfExprs(pNewQueryInfo),
numOfCols, pNewQueryInfo->fieldsInfo.numOfOutput, index.columnIndex, pNewQueryInfo->pTableMetaInfo[0]->name);
} else {
......@@ -1280,7 +1280,7 @@ int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
tscTrace(
"%p subquery:%p tableIndex:%d, vgroupIndex:%d, type:%u, transfer to ts_comp query to retrieve timestamps, "
"exprInfo:%z, colList:%z, fieldsInfo:%d, name:%s",
"exprInfo:%zu, colList:%zu, fieldsInfo:%d, name:%s",
pSql, pNew, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscSqlExprNumOfExprs(pNewQueryInfo),
numOfCols, pNewQueryInfo->fieldsInfo.numOfOutput, pNewQueryInfo->pTableMetaInfo[0]->name);
}
......@@ -1647,7 +1647,7 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
// all sub-queries are returned, start to local merge process
pDesc->pColumnModel->capacity = trsupport->pExtMemBuffer[idx]->numOfElemsPerPage;
tscTrace("%p retrieve from %d vnodes completed.final NumOfRows:%d,start to build loser tree", pPObj,
tscTrace("%p retrieve from %d vnodes completed.final NumOfRows:%" PRId64 ",start to build loser tree", pPObj,
pState->numOfTotal, pState->numOfRetrievedRows);
SQueryInfo *pPQueryInfo = tscGetQueryInfoDetail(&pPObj->cmd, 0);
......@@ -1869,7 +1869,7 @@ static void multiVnodeInsertMerge(void* param, TAOS_RES* tres, int numOfRows) {
return;
}
tscTrace("%p Async insertion completed, total inserted:%d", pParentObj, pParentObj->res.numOfRows);
tscTrace("%p Async insertion completed, total inserted:%" PRId64, pParentObj, pParentObj->res.numOfRows);
tfree(pState);
tfree(pSupporter);
......@@ -2049,7 +2049,7 @@ static void transferNcharData(SSqlObj *pSql, int32_t columnIndex, TAOS_FIELD *pF
pRes->tsrow[columnIndex] = pRes->buffer[columnIndex];
pRes->length[columnIndex] = length;
} else {
tscError("%p charset:%s to %s. val:%ls convert failed.", pSql, DEFAULT_UNICODE_ENCODEC, tsCharset, pRes->tsrow[columnIndex]);
tscError("%p charset:%s to %s. val:%s convert failed.", pSql, DEFAULT_UNICODE_ENCODEC, tsCharset, (char*)pRes->tsrow[columnIndex]);
pRes->tsrow[columnIndex] = NULL;
pRes->length[columnIndex] = 0;
}
......
......@@ -1835,7 +1835,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
assert(pFinalInfo->vgroupList != NULL);
}
if (cmd == TSDB_SQL_SELECT) {
size_t size = taosArrayGetSize(pNewQueryInfo->colList);
......
......@@ -1061,7 +1061,7 @@ static int32_t mnodeAddSuperTableColumn(SMnodeMsg *pMsg, SSchema schema[], int32
SDbObj *pDb = pMsg->pDb;
SSuperTableObj *pStable = (SSuperTableObj *)pMsg->pTable;
if (ncols <= 0) {
mError("app:%p:%p, stable:%s, add column, ncols:%d <= 0", pMsg->rpcMsg.ahandle, pMsg, pStable->info.tableId);
mError("app:%p:%p, stable:%s, add column, ncols:%d <= 0", pMsg->rpcMsg.ahandle, pMsg, pStable->info.tableId, ncols);
return TSDB_CODE_MND_APP_ERROR;
}
......@@ -1689,7 +1689,7 @@ static int32_t mnodeProcessCreateChildTableMsg(SMnodeMsg *pMsg) {
}
if (pMsg->pTable == NULL) {
mError("app:%p:%p, table:%s, object not found, retry:%d reason:%s", pMsg->rpcMsg.ahandle, pMsg, pCreate->tableId,
mError("app:%p:%p, table:%s, object not found, retry:%d reason:%s", pMsg->rpcMsg.ahandle, pMsg, pCreate->tableId, pMsg->retry,
tstrerror(terrno));
return terrno;
} else {
......@@ -1758,7 +1758,7 @@ static int32_t mnodeAddNormalTableColumn(SMnodeMsg *pMsg, SSchema schema[], int3
SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable;
SDbObj *pDb = pMsg->pDb;
if (ncols <= 0) {
mError("app:%p:%p, ctable:%s, add column, ncols:%d <= 0", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId);
mError("app:%p:%p, ctable:%s, add column, ncols:%d <= 0", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId, ncols);
return TSDB_CODE_MND_APP_ERROR;
}
......@@ -2023,7 +2023,7 @@ static void mnodeDropAllChildTablesInStable(SSuperTableObj *pStable) {
int32_t numOfTables = 0;
SChildTableObj *pTable = NULL;
mPrint("stable:%s, all child tables will dropped from sdb", pStable->info.tableId, numOfTables);
mPrint("stable:%s, all child tables(%d) will dropped from sdb", pStable->info.tableId, numOfTables);
while (1) {
pIter = mnodeGetNextChildTable(pIter, &pTable);
......
......@@ -739,7 +739,7 @@ static int32_t mnodeProcessVnodeCfgMsg(SMnodeMsg *pMsg) {
SDnodeObj *pDnode = mnodeGetDnode(pCfg->dnodeId);
if (pDnode == NULL) {
mTrace("dnode:%s, invalid dnode", taosIpStr(pCfg->dnodeId), pCfg->vgId);
mTrace("dnode:%s, vgId:%d, invalid dnode", taosIpStr(pCfg->dnodeId), pCfg->vgId);
return TSDB_CODE_MND_VGROUP_NOT_EXIST;
}
mnodeDecDnodeRef(pDnode);
......
......@@ -70,7 +70,7 @@ bool httpInitContexts() {
void httpCleanupContexts() {
if (tsHttpServer.contextCache != NULL) {
SCacheObj *cache = tsHttpServer.contextCache;
httpPrint("context cache is cleanuping, size:%d", taosHashGetSize(cache->pHashTable));
httpPrint("context cache is cleanuping, size:%zu", taosHashGetSize(cache->pHashTable));
taosCacheCleanup(tsHttpServer.contextCache);
tsHttpServer.contextCache = NULL;
}
......
......@@ -139,7 +139,7 @@ int httpWriteJsonBufBody(JsonBuf* buf, bool isTheLast) {
return 0; // there is no data to dump.
}
} else {
httpError("context:%p, fd:%d, ip:%s, failed to compress data, chunkSize:%d, last:%d, error:%d, response:\n%s",
httpError("context:%p, fd:%d, ip:%s, failed to compress data, chunkSize:%" PRIu64 ", last:%d, error:%d, response:\n%s",
buf->pContext, buf->pContext->fd, buf->pContext->ipstr, srcLen, isTheLast, ret, buf->buf);
return 0;
}
......
......@@ -108,7 +108,7 @@ static void httpDestroySession(void *data) {
void httpCleanUpSessions() {
if (tsHttpServer.sessionCache != NULL) {
SCacheObj *cache = tsHttpServer.sessionCache;
httpPrint("session cache is cleanuping, size:%d", taosHashGetSize(cache->pHashTable));
httpPrint("session cache is cleanuping, size:%zu", taosHashGetSize(cache->pHashTable));
taosCacheCleanup(tsHttpServer.sessionCache);
tsHttpServer.sessionCache = NULL;
}
......
......@@ -208,7 +208,7 @@ static void monitorInitDatabase() {
static void monitorInitDatabaseCb(void *param, TAOS_RES *result, int32_t code) {
if (-code == TSDB_CODE_MND_TABLE_ALREADY_EXIST || -code == TSDB_CODE_MND_DB_ALREADY_EXIST || code >= 0) {
monitorTrace("monitor:%p, sql success, reason:%d, %s", tsMonitorConn.conn, tstrerror(code), tsMonitorConn.sql);
monitorTrace("monitor:%p, sql success, reason:%s, %s", tsMonitorConn.conn, tstrerror(code), tsMonitorConn.sql);
if (tsMonitorConn.cmdIndex == MONITOR_CMD_CREATE_TB_LOG) {
monitorPrint("dnode:%s is started", tsLocalEp);
}
......
......@@ -273,7 +273,7 @@ static bool limitResults(SQueryRuntimeEnv* pRuntimeEnv) {
if ((pQuery->limit.limit > 0) && (pQuery->rec.total + pQuery->rec.rows > pQuery->limit.limit)) {
pQuery->rec.rows = pQuery->limit.limit - pQuery->rec.total;
qTrace("QInfo:%p discard remain data due to result limitation, limit:%"PRId64", current return:%d, total:%"PRId64,
qTrace("QInfo:%p discard remain data due to result limitation, limit:%"PRId64", current return:%" PRId64 ", total:%"PRId64,
pQInfo, pQuery->limit.limit, pQuery->rec.rows, pQuery->rec.total + pQuery->rec.rows);
assert(pQuery->rec.rows >= 0);
setQueryStatus(pQuery, QUERY_COMPLETED);
......@@ -2060,7 +2060,7 @@ static void ensureOutputBufferSimple(SQueryRuntimeEnv* pRuntimeEnv, int32_t capa
pRuntimeEnv->pCtx[i].aOutputBuf = pQuery->sdata[i]->data;
}
qTrace("QInfo:%p realloc output buffer to inc output buffer from: %d rows to:%d rows", GET_QINFO_ADDR(pRuntimeEnv),
qTrace("QInfo:%p realloc output buffer to inc output buffer from: %" PRId64 " rows to:%d rows", GET_QINFO_ADDR(pRuntimeEnv),
pQuery->rec.capacity, capacity);
pQuery->rec.capacity = capacity;
......@@ -2096,7 +2096,7 @@ static void ensureOutputBuffer(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pB
}
}
qTrace("QInfo:%p realloc output buffer, new size: %d rows, old:%d, remain:%d", GET_QINFO_ADDR(pRuntimeEnv),
qTrace("QInfo:%p realloc output buffer, new size: %d rows, old:%" PRId64 ", remain:%" PRId64, GET_QINFO_ADDR(pRuntimeEnv),
newSize, pRec->capacity, newSize - pRec->rows);
pRec->capacity = newSize;
......@@ -2270,8 +2270,8 @@ void setTagVal(SQueryRuntimeEnv *pRuntimeEnv, STableId* pTableId, void *tsdb) {
}
doSetTagValueInParam(tsdb, pTableId, pExprInfo->base.arg->argValue.i64, &pRuntimeEnv->pCtx[0].tag, type, bytes);
qTrace("QInfo:%p set tag value for join comparison, colId:%d, val:%"PRId64, pQInfo, pExprInfo->base.arg->argValue.i64,
pRuntimeEnv->pCtx[0].tag)
qTrace("QInfo:%p set tag value for join comparison, colId:%" PRId64 ", val:%"PRId64, pQInfo, pExprInfo->base.arg->argValue.i64,
pRuntimeEnv->pCtx[0].tag.i64Key)
}
}
}
......@@ -2494,7 +2494,7 @@ int32_t mergeIntoGroupResult(SQInfo *pQInfo) {
qTrace("QInfo:%p no result in group %d, continue", pQInfo, pQInfo->groupIndex - 1);
}
qTrace("QInfo:%p merge res data into group, index:%d, total group:%d, elapsed time:%lldms", pQInfo,
qTrace("QInfo:%p merge res data into group, index:%d, total group:%d, elapsed time:%" PRId64 "ms", pQInfo,
pQInfo->groupIndex - 1, numOfGroups, taosGetTimestampMs() - st);
return TSDB_CODE_SUCCESS;
......@@ -2952,7 +2952,7 @@ void skipResults(SQueryRuntimeEnv *pRuntimeEnv) {
}
if (pQuery->rec.rows <= pQuery->limit.offset) {
qTrace("QInfo:%p skip rows:%d, new offset:%" PRIu64, GET_QINFO_ADDR(pRuntimeEnv), pQuery->rec.rows,
qTrace("QInfo:%p skip rows:%" PRId64 ", new offset:%" PRIu64, GET_QINFO_ADDR(pRuntimeEnv), pQuery->rec.rows,
pQuery->limit.offset - pQuery->rec.rows);
pQuery->limit.offset -= pQuery->rec.rows;
......@@ -3696,7 +3696,7 @@ int32_t doFillGapsInResults(SQueryRuntimeEnv* pRuntimeEnv, tFilePage **pDst, int
}
if (pQuery->limit.offset < ret) {
qTrace("QInfo:%p initial numOfRows:%d, generate filled result:%d rows, offset:%d. Discard due to offset, remain:%d, new offset:%d",
qTrace("QInfo:%p initial numOfRows:%d, generate filled result:%d rows, offset:%" PRId64 ". Discard due to offset, remain:%" PRId64 ", new offset:%d",
pQInfo, pFillInfo->numOfRows, ret, pQuery->limit.offset, ret - pQuery->limit.offset, 0);
ret -= pQuery->limit.offset;
......@@ -3710,8 +3710,8 @@ int32_t doFillGapsInResults(SQueryRuntimeEnv* pRuntimeEnv, tFilePage **pDst, int
pQuery->limit.offset = 0;
return ret;
} else {
qTrace("QInfo:%p initial numOfRows:%d, generate filled result:%d rows, offset:%d. Discard due to offset, "
"remain:%d, new offset:%d", pQInfo, pFillInfo->numOfRows, ret, pQuery->limit.offset, 0,
qTrace("QInfo:%p initial numOfRows:%d, generate filled result:%d rows, offset:%" PRId64 ". Discard due to offset, "
"remain:%d, new offset:%" PRId64, pQInfo, pFillInfo->numOfRows, ret, pQuery->limit.offset, 0,
pQuery->limit.offset - ret);
pQuery->limit.offset -= ret;
......@@ -4259,7 +4259,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
while (pQInfo->groupIndex < numOfGroups) {
SArray* group = taosArrayGetP(pQInfo->groupInfo.pGroupList, pQInfo->groupIndex);
qTrace("QInfo:%p last_row query on group:%d, total group:%u, current group:%p", pQInfo, pQInfo->groupIndex,
qTrace("QInfo:%p last_row query on group:%d, total group:%zu, current group:%p", pQInfo, pQInfo->groupIndex,
numOfGroups, group);
STsdbQueryCond cond = {
......@@ -4324,7 +4324,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
while (pQInfo->groupIndex < numOfGroups) {
SArray* group = taosArrayGetP(pQInfo->groupInfo.pGroupList, pQInfo->groupIndex);
qTrace("QInfo:%p group by normal columns group:%d, total group:%d", pQInfo, pQInfo->groupIndex, numOfGroups);
qTrace("QInfo:%p group by normal columns group:%d, total group:%zu", pQInfo, pQInfo->groupIndex, numOfGroups);
STsdbQueryCond cond = {
.twindow = pQuery->window,
......@@ -4510,7 +4510,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
}
qTrace(
"QInfo %p numOfTables:%"PRIu64", index:%d, numOfGroups:%d, %"PRId64" points returned, total:%"PRId64", offset:%" PRId64,
"QInfo %p numOfTables:%"PRIu64", index:%d, numOfGroups:%zu, %"PRId64" points returned, total:%"PRId64", offset:%" PRId64,
pQInfo, pQInfo->groupInfo.numOfTables, pQInfo->tableIndex, numOfGroups, pQuery->rec.rows, pQuery->rec.total,
pQuery->limit.offset);
}
......@@ -4606,7 +4606,7 @@ static void multiTableQueryProcess(SQInfo *pQInfo) {
// do check all qualified data blocks
int64_t el = scanMultiTableDataBlocks(pQInfo);
qTrace("QInfo:%p master scan completed, elapsed time: %lldms, reverse scan start", pQInfo, el);
qTrace("QInfo:%p master scan completed, elapsed time: %" PRId64 "ms, reverse scan start", pQInfo, el);
// query error occurred or query is killed, abort current execution
if (pQInfo->code != TSDB_CODE_SUCCESS || isQueryKilled(pQInfo)) {
......@@ -4621,7 +4621,7 @@ static void multiTableQueryProcess(SQInfo *pQInfo) {
doSaveContext(pQInfo);
el = scanMultiTableDataBlocks(pQInfo);
qTrace("QInfo:%p reversed scan completed, elapsed time: %lldms", pQInfo, el);
qTrace("QInfo:%p reversed scan completed, elapsed time: %" PRId64 "ms", pQInfo, el);
doRestoreContext(pQInfo);
} else {
......@@ -4648,7 +4648,7 @@ static void multiTableQueryProcess(SQInfo *pQInfo) {
}
// handle the limitation of output buffer
qTrace("QInfo:%p points returned:%d, total:%d", pQInfo, pQuery->rec.rows, pQuery->rec.total + pQuery->rec.rows);
qTrace("QInfo:%p points returned:%" PRId64 ", total:%" PRId64, pQInfo, pQuery->rec.rows, pQuery->rec.total + pQuery->rec.rows);
}
/*
......@@ -4720,8 +4720,8 @@ static void tableMultiOutputProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo)
break;
}
qTrace("QInfo:%p vid:%d sid:%d id:%s, skip current result, offset:%" PRId64 ", next qrange:%" PRId64 "-%" PRId64,
pQInfo, pQuery->limit.offset, pQuery->current->lastKey);
qTrace("QInfo:%p skip current result, offset:%" PRId64 ", next qrange:%" PRId64 "-%" PRId64,
pQInfo, pQuery->limit.offset, pQuery->current->lastKey, pQuery->current->win.ekey);
resetCtxOutputBuf(pRuntimeEnv);
}
......@@ -4849,7 +4849,7 @@ static void tableQueryImpl(SQInfo *pQInfo) {
limitResults(pRuntimeEnv);
}
qTrace("QInfo:%p current:%d returned, total:%d", pQInfo, pQuery->rec.rows, pQuery->rec.total);
qTrace("QInfo:%p current:%" PRId64 " returned, total:%" PRId64, pQInfo, pQuery->rec.rows, pQuery->rec.total);
return;
}
......@@ -4931,7 +4931,7 @@ static void stableQueryImpl(SQInfo *pQInfo) {
pQInfo->runtimeEnv.summary.elapsedTime += (taosGetTimestampUs() - st);
if (pQuery->rec.rows == 0) {
qTrace("QInfo:%p over, %d tables queried, %"PRId64" rows are returned", pQInfo, pQInfo->groupInfo.numOfTables, pQuery->rec.total);
qTrace("QInfo:%p over, %zu tables queried, %"PRId64" rows are returned", pQInfo, pQInfo->groupInfo.numOfTables, pQuery->rec.total);
}
}
......@@ -5233,7 +5233,7 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList,
}
static int32_t buildAirthmeticExprFromMsg(SExprInfo *pArithExprInfo, SQueryTableMsg *pQueryMsg) {
qTrace("qmsg:%p create arithmetic expr from binary string", pQueryMsg, pArithExprInfo->base.arg[0].argValue.pz);
qTrace("qmsg:%p create arithmetic expr from binary string: %s", pQueryMsg, pArithExprInfo->base.arg[0].argValue.pz);
tExprNode* pExprNode = NULL;
TRY(32) {
......@@ -5853,7 +5853,7 @@ static int32_t doDumpQueryResult(SQInfo *pQInfo, char *data) {
}
pQuery->rec.total += pQuery->rec.rows;
qTrace("QInfo:%p current numOfRes rows:%d, total:%d", pQInfo, pQuery->rec.rows, pQuery->rec.total);
qTrace("QInfo:%p current numOfRes rows:%" PRId64 ", total:%" PRId64, pQInfo, pQuery->rec.rows, pQuery->rec.total);
if (pQuery->limit.limit > 0 && pQuery->limit.limit == pQuery->rec.total) {
qTrace("QInfo:%p results limitation reached, limitation:%"PRId64, pQInfo, pQuery->limit.limit);
......@@ -5939,7 +5939,7 @@ int32_t qCreateQueryInfo(void *tsdb, int32_t vgId, SQueryTableMsg *pQueryMsg, qi
SArray* p = taosArrayClone(pTableIdList);
taosArrayPush(groupInfo.pGroupList, &p);
qTrace("qmsg:%p query on %d tables in one group from client", pQueryMsg, groupInfo.numOfTables);
qTrace("qmsg:%p query on %zu tables in one group from client", pQueryMsg, groupInfo.numOfTables);
}
} else {
assert(0);
......
......@@ -696,7 +696,7 @@ static SRpcConn *rpcGetConnObj(SRpcInfo *pRpc, int sid, SRecvInfo *pRecv) {
if (pConn) {
if (pConn->linkUid != pHead->linkUid) {
terrno = TSDB_CODE_RPC_MISMATCHED_LINK_ID;
tError("%s %p %p, linkUid:0x%x is not matched with received:0x%x", pRpc->label, pConn, pHead->ahandle, pConn->linkUid, pHead->linkUid);
tError("%s %p %p, linkUid:0x%x is not matched with received:0x%x", pRpc->label, pConn, (void*)pHead->ahandle, pConn->linkUid, pHead->linkUid);
pConn = NULL;
}
}
......
......@@ -299,7 +299,7 @@ void *taosInitTcpClient(uint32_t ip, uint16_t port, char *label, int num, void *
return NULL;
}
tTrace("%s TCP client is initialized, ip:%s:%hu", label, ip, port);
tTrace("%s TCP client is initialized, ip:%u:%hu", label, ip, port);
return pThreadObj;
}
......
......@@ -420,7 +420,7 @@ int tsdbUpdateTagValue(TsdbRepoT *repo, SUpdateTableTagValMsg *pMsg) {
if (pTable->type != TSDB_CHILD_TABLE) {
tsdbError("vgId:%d failed to update tag value of table %s since its type is %d", pRepo->config.tsdbId,
varDataVal(pTable->name), pTable->type);
pTable->name->data, pTable->type);
return TSDB_CODE_TDB_INVALID_TABLE_TYPE;
}
......@@ -450,7 +450,7 @@ int tsdbUpdateTagValue(TsdbRepoT *repo, SUpdateTableTagValMsg *pMsg) {
tsdbError(
"vgId:%d failed to update tag value of table %s since version out of date, client tag version:%d server tag "
"version:%d",
pRepo->config.tsdbId, varDataVal(pTable->name), tversion, schemaVersion(pTable->tagSchema));
pRepo->config.tsdbId, pTable->name->data, tversion, schemaVersion(pTable->tagSchema));
return TSDB_CODE_TDB_TAG_VER_OUT_OF_DATE;
}
if (schemaColAt(pTagSchema, DEFAULT_TAG_INDEX_COLUMN)->colId == htons(pMsg->colId)) {
......@@ -945,7 +945,7 @@ static int32_t tdInsertRowToTable(STsdbRepo *pRepo, SDataRow row, STable *pTable
pTable->mem->numOfRows = tSkipListGetSize(pTable->mem->pData);
tsdbTrace("vgId:%d, tid:%d, uid:%" PRId64 ", table:%s a row is inserted to table! key:%" PRId64, pRepo->config.tsdbId,
pTable->tableId.tid, pTable->tableId.uid, varDataVal(pTable->name), dataRowKey(row));
pTable->tableId.tid, pTable->tableId.uid, pTable->name->data, dataRowKey(row));
return 0;
}
......@@ -958,7 +958,7 @@ static int32_t tsdbInsertDataToTable(TsdbRepoT *repo, SSubmitBlk *pBlock, TSKEY
STableId tableId = {.uid = pBlock->uid, .tid = pBlock->tid};
STable *pTable = tsdbIsValidTableToInsert(pRepo->tsdbMeta, tableId);
if (pTable == NULL) {
tsdbError("vgId:%d, failed to get table for insert, uid:" PRIu64 ", tid:%d", pRepo->config.tsdbId, pBlock->uid,
tsdbError("vgId:%d, failed to get table for insert, uid:%" PRIu64 ", tid:%d", pRepo->config.tsdbId, pBlock->uid,
pBlock->tid);
return TSDB_CODE_TDB_INVALID_TABLE_ID;
}
......@@ -970,7 +970,7 @@ static int32_t tsdbInsertDataToTable(TsdbRepoT *repo, SSubmitBlk *pBlock, TSKEY
int16_t nversion = schemaVersion(pSchema);
if (tversion > nversion) {
tsdbTrace("vgId:%d table:%s tid:%d server schema version %d is older than clien version %d, try to config.",
pRepo->config.tsdbId, varDataVal(pTable->name), pTable->tableId.tid, nversion, tversion);
pRepo->config.tsdbId, pTable->name->data, pTable->tableId.tid, nversion, tversion);
void *msg = (*pRepo->appH.configFunc)(pRepo->config.tsdbId, pTable->tableId.tid);
if (msg == NULL) {
return terrno;
......@@ -993,7 +993,7 @@ static int32_t tsdbInsertDataToTable(TsdbRepoT *repo, SSubmitBlk *pBlock, TSKEY
} else {
if (tsdbGetTableSchemaByVersion(pMeta, pTable, tversion) == NULL) {
tsdbError("vgId:%d table:%s tid:%d invalid schema version %d from client", pRepo->config.tsdbId,
varDataVal(pTable->name), pTable->tableId.tid, tversion);
pTable->name->data, pTable->tableId.tid, tversion);
return TSDB_CODE_TDB_TABLE_SCHEMA_VERSION;
}
}
......@@ -1007,9 +1007,9 @@ static int32_t tsdbInsertDataToTable(TsdbRepoT *repo, SSubmitBlk *pBlock, TSKEY
tsdbInitSubmitBlkIter(pBlock, &blkIter);
while ((row = tsdbGetSubmitBlkNext(&blkIter)) != NULL) {
if (dataRowKey(row) < minKey || dataRowKey(row) > maxKey) {
tsdbError("vgId:%d, table:%s, tid:%d, talbe uid:%ld timestamp is out of range. now:" PRId64 ", maxKey:" PRId64
", minKey:" PRId64,
pRepo->config.tsdbId, varDataVal(pTable->name), pTable->tableId.tid, pTable->tableId.uid, now, minKey, maxKey);
tsdbError("vgId:%d, table:%s, tid:%d, talbe uid:%ld timestamp is out of range. now:%" PRId64 ", maxKey:%" PRId64
", minKey:%" PRId64,
pRepo->config.tsdbId, pTable->name->data, pTable->tableId.tid, pTable->tableId.uid, now, minKey, maxKey);
return TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE;
}
......@@ -1279,7 +1279,7 @@ static int tsdbHasDataToCommit(SSkipListIterator **iters, int nIters, TSKEY minK
static void tsdbAlterCompression(STsdbRepo *pRepo, int8_t compression) {
int8_t oldCompRession = pRepo->config.compression;
pRepo->config.compression = compression;
tsdbTrace("vgId:%d, tsdb compression is changed from %d to %d", oldCompRession, compression);
tsdbTrace("tsdb compression is changed from %d to %d", oldCompRession, compression);
}
static void tsdbAlterKeep(STsdbRepo *pRepo, int32_t keep) {
......
......@@ -451,7 +451,7 @@ int tsdbCreateTable(TsdbRepoT *repo, STableCfg *pCfg) {
STable *pTable = tsdbGetTableByUid(pMeta, pCfg->tableId.uid);
if (pTable != NULL) {
tsdbError("vgId:%d table %s already exists, tid %d uid %" PRId64, pRepo->config.tsdbId, varDataVal(pTable->name),
tsdbError("vgId:%d table %s already exists, tid %d uid %" PRId64, pRepo->config.tsdbId, pTable->name->data,
pTable->tableId.tid, pTable->tableId.uid);
return TSDB_CODE_TDB_TABLE_ALREADY_EXIST;
}
......@@ -485,11 +485,11 @@ int tsdbCreateTable(TsdbRepoT *repo, STableCfg *pCfg) {
// Register to meta
if (newSuper) {
tsdbAddTableToMeta(pMeta, super, true);
tsdbTrace("vgId:%d, super table %s is created! uid:%" PRId64, pRepo->config.tsdbId, varDataVal(super->name),
tsdbTrace("vgId:%d, super table %s is created! uid:%" PRId64, pRepo->config.tsdbId, super->name->data,
super->tableId.uid);
}
tsdbAddTableToMeta(pMeta, table, true);
tsdbTrace("vgId:%d, table %s is created! tid:%d, uid:%" PRId64, pRepo->config.tsdbId, varDataVal(table->name),
tsdbTrace("vgId:%d, table %s is created! tid:%d, uid:%" PRId64, pRepo->config.tsdbId, table->name->data,
table->tableId.tid, table->tableId.uid);
// Write to meta file
......@@ -595,7 +595,7 @@ int tsdbDropTable(TsdbRepoT *repo, STableId tableId) {
STable *pTable = tsdbGetTableByUid(pMeta, tableId.uid);
if (pTable == NULL) {
tsdbError("vgId:%d, failed to drop table since table not exists! tid:%d, uid:" PRId64, pRepo->config.tsdbId,
tsdbError("vgId:%d, failed to drop table since table not exists! tid:%d, uid:%" PRId64, pRepo->config.tsdbId,
tableId.tid, tableId.uid);
return -1;
}
......@@ -604,7 +604,7 @@ int tsdbDropTable(TsdbRepoT *repo, STableId tableId) {
pRepo->appH.cqDropFunc(pTable->cqhandle);
}
tsdbTrace("vgId:%d, table %s is dropped! tid:%d, uid:%" PRId64, pRepo->config.tsdbId, varDataVal(pTable->name),
tsdbTrace("vgId:%d, table %s is dropped! tid:%d, uid:%" PRId64, pRepo->config.tsdbId, pTable->name->data,
tableId.tid, tableId.uid);
if (tsdbRemoveTableFromMeta(pMeta, pTable, true) < 0) return -1;
......
......@@ -195,7 +195,7 @@ TsdbQueryHandleT* tsdbQueryTables(TsdbRepoT* tsdb, STsdbQueryCond* pCond, STable
}
}
tsdbTrace("%p total numOfTable:%d in query", pQueryHandle, taosArrayGetSize(pQueryHandle->pTableCheckInfo));
tsdbTrace("%p total numOfTable:%zu in query", pQueryHandle, taosArrayGetSize(pQueryHandle->pTableCheckInfo));
tsdbInitDataBlockLoadInfo(&pQueryHandle->dataBlockLoadInfo);
tsdbInitCompBlockLoadInfo(&pQueryHandle->compBlockLoadInfo);
......@@ -1095,7 +1095,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
cur->rows = numOfRows;
cur->pos = pos;
tsdbTrace("%p uid:%" PRIu64",tid:%d data block created, brange:%"PRIu64"-%"PRIu64" %p", pQueryHandle, cur->win.skey,
tsdbTrace("%p uid:%" PRIu64",tid:%d data block created, brange:%"PRIu64"-%"PRIu64" rows:%d, %p", pQueryHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, cur->win.skey,
cur->win.ekey, cur->rows, pQueryHandle->qinfo);
}
......@@ -1195,7 +1195,7 @@ static int32_t dataBlockOrderCompar(const void* pLeft, const void* pRight, void*
if (pLeftBlockInfoEx->compBlock->offset == pRightBlockInfoEx->compBlock->offset &&
pLeftBlockInfoEx->compBlock->last == pRightBlockInfoEx->compBlock->last) {
// todo add more information
tsdbError("error in header file, two block with same offset:%p", pLeftBlockInfoEx->compBlock->offset);
tsdbError("error in header file, two block with same offset:%" PRId64, (int64_t)pLeftBlockInfoEx->compBlock->offset);
}
return pLeftBlockInfoEx->compBlock->offset > pRightBlockInfoEx->compBlock->offset ? 1 : -1;
......@@ -2020,7 +2020,7 @@ SArray* createTableGroup(SArray* pTableList, STSchema* pTagSchema, SColIndex* pC
}
taosArrayPush(pTableGroup, &sa);
tsdbTrace("all %d tables belong to one group", size);
tsdbTrace("all %zu tables belong to one group", size);
} else {
STableGroupSupporter *pSupp = (STableGroupSupporter *) calloc(1, sizeof(STableGroupSupporter));
pSupp->tsdbMeta = tsdbGetMeta(tsdb);
......@@ -2125,7 +2125,7 @@ int32_t tsdbQuerySTableByTagCond(TsdbRepoT* tsdb, uint64_t uid, const char* pTag
if (pTable->type != TSDB_SUPER_TABLE) {
tsdbError("%p query normal tag not allowed, uid:%" PRIu64 ", tid:%d, name:%s", tsdb, uid, pTable->tableId.tid,
pTable->name);
pTable->name->data);
return TSDB_CODE_COM_OPS_NOT_SUPPORT; //basically, this error is caused by invalid sql issued by client
}
......@@ -2140,7 +2140,7 @@ int32_t tsdbQuerySTableByTagCond(TsdbRepoT* tsdb, uint64_t uid, const char* pTag
pGroupInfo->numOfTables = taosArrayGetSize(res);
pGroupInfo->pGroupList = createTableGroup(res, pTagSchema, pColIndex, numOfCols, tsdb);
tsdbTrace("%p no table name/tag condition, all tables belong to one group, numOfTables:%d", tsdb, pGroupInfo->numOfTables);
tsdbTrace("%p no table name/tag condition, all tables belong to one group, numOfTables:%zu", tsdb, pGroupInfo->numOfTables);
} else {
// todo add error
}
......@@ -2184,7 +2184,7 @@ int32_t tsdbQuerySTableByTagCond(TsdbRepoT* tsdb, uint64_t uid, const char* pTag
pGroupInfo->numOfTables = taosArrayGetSize(res);
pGroupInfo->pGroupList = createTableGroup(res, pTagSchema, pColIndex, numOfCols, tsdb);
tsdbTrace("%p stable tid:%d, uid:%"PRIu64" query, numOfTables:%d, belong to %d groups", tsdb, pTable->tableId.tid,
tsdbTrace("%p stable tid:%d, uid:%"PRIu64" query, numOfTables:%zu, belong to %zu groups", tsdb, pTable->tableId.tid,
pTable->tableId.uid, pGroupInfo->numOfTables, taosArrayGetSize(pGroupInfo->pGroupList));
taosArrayDestroy(res);
......
......@@ -32,8 +32,18 @@ int32_t taosInitLog(char *logName, int32_t numOfLogLines, int32_t maxFiles);
void taosCloseLog();
void taosResetLog();
void taosPrintLog(const char *const flags, int32_t dflag, const char *const format, ...);
void taosPrintLongString(const char *const flags, int32_t dflag, const char *const format, ...);
void taosPrintLog(const char *flags, int32_t dflag, const char *format, ...)
#ifdef __GNUC__
__attribute__((format(printf, 3, 4)))
#endif
;
void taosPrintLongString(const char * flags, int32_t dflag, const char *format, ...)
#ifdef __GNUC__
__attribute__((format(printf, 3, 4)))
#endif
;
void taosDumpData(unsigned char *msg, int32_t len);
#ifdef __cplusplus
......
......@@ -117,7 +117,7 @@ SKVStore *tdOpenKVStore(char *fname, iterFunc iFunc, afterFunc aFunc, void *appH
if (tdLoadKVStoreHeader(pStore->sfd, pStore->fsnap, &info) < 0) goto _err;
if (ftruncate(pStore->fd, info.size) < 0) {
uError("failed to truncate %s to " PRId64 " size since %s", pStore->fname, info.size, strerror(errno));
uError("failed to truncate %s to %" PRId64 " size since %s", pStore->fname, info.size, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
......@@ -245,7 +245,7 @@ int tdDropKVStoreRecord(SKVStore *pStore, uint64_t uid) {
SKVRecord *pRecord = taosHashGet(pStore->map, &uid, sizeof(uid));
if (pRecord == NULL) {
uError("failed to drop KV store record with key " PRIu64 " since not find", uid);
uError("failed to drop KV store record with key %" PRIu64 " since not find", uid);
return -1;
}
......@@ -256,7 +256,7 @@ int tdDropKVStoreRecord(SKVStore *pStore, uint64_t uid) {
void *pBuf = tdEncodeKVRecord(buf, &rInfo);
if (twrite(pStore->fd, buf, POINTER_DISTANCE(pBuf, buf)) < POINTER_DISTANCE(pBuf, buf)) {
uError("failed to write %d bytes to file %s since %s", POINTER_DISTANCE(pBuf, buf), pStore->fname, strerror(errno));
uError("failed to write %" PRId64 " bytes to file %s since %s", POINTER_DISTANCE(pBuf, buf), pStore->fname, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
......@@ -456,7 +456,7 @@ static int tdRestoreKVStore(SKVStore *pStore) {
ssize_t tsize = tread(pStore->fd, tbuf, sizeof(SKVRecord));
if (tsize == 0) break;
if (tsize < sizeof(SKVRecord)) {
uError("failed to read %d bytes from file %s since %s", sizeof(SKVRecord), pStore->fname, strerror(errno));
uError("failed to read %zu bytes from file %s since %s", sizeof(SKVRecord), pStore->fname, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
......@@ -514,13 +514,13 @@ static int tdRestoreKVStore(SKVStore *pStore) {
}
if (tread(pStore->fd, buf, pRecord->size) < pRecord->size) {
uError("failed to read %d bytes from file %s since %s", pRecord->size, pStore->fname, strerror(errno));
uError("failed to read %" PRId64 " bytes from file %s since %s", pRecord->size, pStore->fname, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
if (!taosCheckChecksumWhole((uint8_t *)buf, pRecord->size)) {
uError("file %s has checksum error, offset " PRId64 " size %d", pStore->fname, pRecord->offset, pRecord->size);
uError("file %s has checksum error, offset %" PRId64 " size %" PRId64, pStore->fname, pRecord->offset, pRecord->size);
terrno = TSDB_CODE_COM_FILE_CORRUPTED;
goto _err;
}
......
......@@ -309,7 +309,7 @@ static int32_t taosOpenLogFile(char *fn, int32_t maxLines, int32_t maxFileNum) {
return 0;
}
void taosPrintLog(const char *const flags, int32_t dflag, const char *const format, ...) {
void taosPrintLog(const char *flags, int32_t dflag, const char *format, ...) {
if (tsTotalLogDirGB != 0 && tsAvailLogDirGB < tsMinimalLogDirGB) {
printf("server disk:%s space remain %.3f GB, total %.1f GB, stop print log.\n", tsLogDir, tsAvailLogDirGB, tsTotalLogDirGB);
fflush(stdout);
......@@ -396,7 +396,7 @@ void taosDumpData(unsigned char *msg, int32_t len) {
return;
}
void taosPrintLongString(const char *const flags, int32_t dflag, const char *const format, ...) {
void taosPrintLongString(const char *flags, int32_t dflag, const char *format, ...) {
if (tsTotalLogDirGB != 0 && tsAvailLogDirGB < tsMinimalLogDirGB) {
printf("server disk:%s space remain %.3f GB, total %.1f GB, stop write log.\n", tsLogDir, tsAvailLogDirGB, tsTotalLogDirGB);
fflush(stdout);
......
......@@ -439,7 +439,7 @@ void vnodeSetAccess(SDMVgroupAccess *pAccess, int32_t numOfVnodes) {
if (pVnode != NULL) {
pVnode->accessState = pAccess[i].accessState;
if (pVnode->accessState != TSDB_VN_ALL_ACCCESS) {
vTrace("vgId:%d, access state is set to %d", pAccess[i].vgId)
vTrace("vgId:%d, access state is set to %d", pAccess[i].vgId, pVnode->accessState)
}
vnodeRelease(pVnode);
}
......@@ -734,7 +734,7 @@ static int32_t vnodeReadCfg(SVnodeObj *pVnode) {
cJSON *quorum = cJSON_GetObjectItem(root, "quorum");
if (!quorum || quorum->type != cJSON_Number) {
vError("failed to read vnode cfg, quorum not found", pVnode->vgId);
vError("vgId: %d, failed to read vnode cfg, quorum not found", pVnode->vgId);
goto PARSE_OVER;
}
pVnode->syncCfg.quorum = (int8_t)quorum->valueint;
......
......@@ -86,7 +86,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
killQueryMsg->free = htons(killQueryMsg->free);
killQueryMsg->qhandle = htobe64(killQueryMsg->qhandle);
vWarn("QInfo:%p connection %p broken, kill query", killQueryMsg->qhandle, pReadMsg->rpcMsg.handle);
vWarn("QInfo:%p connection %p broken, kill query", (void*)killQueryMsg->qhandle, pReadMsg->rpcMsg.handle);
assert(pReadMsg->rpcMsg.contLen > 0 && killQueryMsg->free == 1);
// this message arrived here by means of the query message, so release the vnode is necessary
......
......@@ -110,7 +110,7 @@ void createDbAndTable() {
for (int64_t t = 0; t < totalTables; ++t) {
sprintf(qstr, "create table if not exists %s%ld using %s tags(%ld)", stableName, t, stableName, t);
if (taos_query(con, qstr)) {
pError("failed to create table %s%d, reason:%s", stableName, t, taos_errstr(con));
pError("failed to create table %s%" PRId64 ", reason:%s", stableName, t, taos_errstr(con));
exit(0);
}
}
......@@ -141,7 +141,7 @@ void insertData() {
gettimeofday(&systemTime, NULL);
st = systemTime.tv_sec * 1000000 + systemTime.tv_usec;
pPrint("%d threads are spawned to import data", numOfThreads);
pPrint("%" PRId64 " threads are spawned to import data", numOfThreads);
pthread_attr_t thattr;
pthread_attr_init(&thattr);
......@@ -323,8 +323,8 @@ void shellParseArgument(int argc, char *argv[]) {
pPrint("%spointsPerTable:%" PRId64 "%s", GREEN, pointsPerTable, NC);
pPrint("%snumOfThreads:%" PRId64 "%s", GREEN, numOfThreads, NC);
pPrint("%snumOfTablesPerThread:%" PRId64 "%s", GREEN, numOfTablesPerThread, NC);
pPrint("%scache:%" PRId64 "%s", GREEN, cache, NC);
pPrint("%stables:%" PRId64 "%s", GREEN, tables, NC);
pPrint("%scache:%d%s", GREEN, cache, NC);
pPrint("%stables:%d%s", GREEN, tables, NC);
pPrint("%sdbName:%s%s", GREEN, dbName, NC);
pPrint("%stableName:%s%s", GREEN, stableName, NC);
pPrint("%sstart to run%s", GREEN, NC);
......
......@@ -119,7 +119,7 @@ void insertData() {
gettimeofday(&systemTime, NULL);
st = systemTime.tv_sec * 1000000 + systemTime.tv_usec;
pPrint("%d threads are spawned to insert data", numOfThreads);
pPrint("%" PRId64 " threads are spawned to insert data", numOfThreads);
pthread_attr_t thattr;
pthread_attr_init(&thattr);
......@@ -202,7 +202,7 @@ void *syncTest(void *param) {
TAOS_RES *pSql = taos_query(con, qstr);
code = taos_errno(pSql);
if (code != 0) {
pError("failed to create table %s%d, reason:%s", stableName, t, taos_errstr(con));
pError("failed to create table %s%" PRId64 ", reason:%s", stableName, t, taos_errstr(con));
exit(0);
}
taos_free_result(pSql);
......@@ -348,8 +348,8 @@ void shellParseArgument(int argc, char *argv[]) {
pPrint("%spointsPerTable:%" PRId64 "%s", GREEN, pointsPerTable, NC);
pPrint("%snumOfThreads:%" PRId64 "%s", GREEN, numOfThreads, NC);
pPrint("%snumOfTablesPerThread:%" PRId64 "%s", GREEN, numOfTablesPerThread, NC);
pPrint("%scache:%" PRId64 "%s", GREEN, cache, NC);
pPrint("%stables:%" PRId64 "%s", GREEN, tables, NC);
pPrint("%scache:%" PRId32 "%s", GREEN, cache, NC);
pPrint("%stables:%" PRId32 "%s", GREEN, tables, NC);
pPrint("%sdbName:%s%s", GREEN, dbName, NC);
pPrint("%stableName:%s%s", GREEN, stableName, NC);
pPrint("%sstart to run%s", GREEN, NC);
......
......@@ -121,7 +121,7 @@ void createDbAndTable() {
pSql = taos_query(con, qstr);
code = taos_errno(pSql);
if (code != 0) {
pError("failed to create table %s%d, reason:%s", stableName, t, taos_errstr(con));
pError("failed to create table %s%" PRId64 ", reason:%s", stableName, t, taos_errstr(con));
exit(0);
}
taos_stop_query(pSql);
......@@ -158,10 +158,10 @@ void insertData() {
st = systemTime.tv_sec * 1000000 + systemTime.tv_usec;
if (rowsPerTable <= 0) {
pPrint("not insert data for rowsPerTable is :%d", rowsPerTable);
pPrint("not insert data for rowsPerTable is :%" PRId64, rowsPerTable);
exit(0);
} else {
pPrint("%d threads are spawned to insert data", numOfThreads);
pPrint("%" PRId64 " threads are spawned to insert data", numOfThreads);
}
pthread_attr_t thattr;
......@@ -348,8 +348,8 @@ void shellParseArgument(int argc, char *argv[]) {
pPrint("%spointsPerTable:%" PRId64 "%s", GREEN, pointsPerTable, NC);
pPrint("%snumOfThreads:%" PRId64 "%s", GREEN, numOfThreads, NC);
pPrint("%snumOfTablesPerThread:%" PRId64 "%s", GREEN, numOfTablesPerThread, NC);
pPrint("%scache:%" PRId64 "%s", GREEN, cache, NC);
pPrint("%stables:%" PRId64 "%s", GREEN, tables, NC);
pPrint("%scache:%" PRId32 "%s", GREEN, cache, NC);
pPrint("%stables:%" PRId32 "%s", GREEN, tables, NC);
pPrint("%sdbName:%s%s", GREEN, dbName, NC);
pPrint("%stableName:%s%s", GREEN, stableName, NC);
pPrint("%sstart to run%s", GREEN, NC);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册