提交 c41f11aa 编写于 作者: H hjxilinx

merge develop branch

......@@ -210,26 +210,26 @@ TDengine 的 JDBC 驱动实现尽可能的与关系型数据库驱动保持一
## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
| --- | --- | --- |
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
| --- | --- | --- |
| 1.0.3 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.1 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.1 | 1.6.1.x 及以上 | 1.8.x |
## TDengine DataType 和 Java DataType
TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
| TDengine DataType | Java DataType |
| --- | --- |
| TIMESTAMP | java.sql.Timestamp |
| INT | java.lang.Integer |
| BIGINT | java.lang.Long |
| FLOAT | java.lang.Float |
| DOUBLE | java.lang.Double |
| TDengine DataType | Java DataType |
| --- | --- |
| TIMESTAMP | java.sql.Timestamp |
| INT | java.lang.Integer |
| BIGINT | java.lang.Long |
| FLOAT | java.lang.Float |
| DOUBLE | java.lang.Double |
| SMALLINT, TINYINT |java.lang.Short |
| BOOL | java.lang.Boolean |
| BINARY, NCHAR | java.lang.String |
| BOOL | java.lang.Boolean |
| BINARY, NCHAR | java.lang.String |
## 如何获取 TAOS-JDBCDriver
......@@ -807,6 +807,8 @@ HTTP请求URL采用`sqlutc`时,返回结果集的时间戳将采用UTC时间
## Go Connector
### linux环境
#### 安装TDengine
Go的连接器使用到了 libtaos.so 和taos.h,因此,在使用Go连接器之前,需要在程序运行的机器上安装TDengine以获得相关的驱动文件。
......@@ -867,7 +869,14 @@ taosSql驱动包内采用cgo模式,调用了TDengine的C/C++同步接口,与
3. 创建表、写入和查询数据
在创建好了数据库后,就可以开始创建表和写入查询数据了。这些操作的基本思路都是首先组装SQL语句,然后调用db.Exec执行,并检查错误信息和执行相应的处理。可以参考上面的样例代码
在创建好了数据库后,就可以开始创建表和写入查询数据了。这些操作的基本思路都是首先组装SQL语句,然后调用db.Exec执行,并检查错误信息和执行相应的处理。可以参考上面的样例代码。
### windows环境
在windows上使用Go,请参考 
[TDengine GO windows驱动的编译和使用](https://www.taosdata.com/blog/2020/01/06/tdengine-go-windows%E9%A9%B1%E5%8A%A8%E7%9A%84%E7%BC%96%E8%AF%91/)
## Node.js Connector
......@@ -1054,6 +1063,8 @@ https://gitee.com/maikebing/Maikebing.EntityFrameworkCore.Taos
├───├── jdbc
├───└── python
├── driver
├───├── libtaos.dll
├───├── libtaos.dll.a
├───├── taos.dll
├───├── taos.exp
├───└── taos.lib
......@@ -1078,8 +1089,8 @@ https://gitee.com/maikebing/Maikebing.EntityFrameworkCore.Taos
+ Client可执行文件: C:/TDengine/taos.exe
+ 配置文件: C:/TDengine/cfg/taos.cfg
+ C驱动程序目录: C:/TDengine/driver
+ C驱动程序头文件: C:/TDengine/include
+ 驱动程序目录: C:/TDengine/driver
+ 驱动程序头文件: C:/TDengine/include
+ JDBC驱动程序目录: C:/TDengine/connector/jdbc
+ GO驱动程序目录:C:/TDengine/connector/go
+ Python驱动程序目录:C:/TDengine/connector/python
......@@ -1106,6 +1117,16 @@ taos -h <ServerIP>
TDengine在Window系统上提供的API与Linux系统是相同的, 应用程序使用时,需要包含TDengine头文件taos.h,连接时需要链接TDengine库taos.lib,运行时将taos.dll放到可执行文件目录下。
#### Go接口注意事项
TDengine在Window系统上提供的API与Linux系统是相同的, 应用程序使用时,除了需要Go的驱动包(C:\TDengine\connector\go)外,还需要包含TDengine头文件taos.h,连接时需要链接TDengine库libtaos.dll、libtaos.dll.a(C:\TDengine\driver),运行时将libtaos.dll、libtaos.dll.a放到可执行文件目录下。
使用参考请见:
[TDengine GO windows驱动的编译和使用](https://www.taosdata.com/blog/2020/01/06/tdengine-go-windows%E9%A9%B1%E5%8A%A8%E7%9A%84%E7%BC%96%E8%AF%91/)
#### JDBC接口注意事项
在Windows系统上,应用程序可以使用JDBC接口来操纵数据库,使用JDBC接口的注意事项如下:
......
......@@ -836,7 +836,7 @@ void tSQLBinaryExprCalcTraverse(tSQLBinaryExpr *pExprs, int32_t numOfRows, char
tSQLSyntaxNode *pRight = pExprs->pRight;
/* the left output has result from the left child syntax tree */
char *pLeftOutput = malloc(sizeof(int64_t) * numOfRows);
char *pLeftOutput = (char*)malloc(sizeof(int64_t) * numOfRows);
if (pLeft->nodeType == TSQL_NODE_EXPR) {
tSQLBinaryExprCalcTraverse(pLeft->pExpr, numOfRows, pLeftOutput, param, order, getSourceDataBlock);
}
......
......@@ -93,7 +93,7 @@ void taos_query_a(TAOS *taos, const char *sqlstr, void (*fp)(void *, TAOS_RES *,
pRes->numOfRows = 1;
strtolower(pSql->sqlstr, sqlstr);
tscTrace("%p Async SQL: %s, pObj:%p", pSql, pSql->sqlstr, pObj);
tscDump("%p pObj:%p, Async SQL: %s", pSql, pObj, pSql->sqlstr);
int32_t code = tsParseSql(pSql, true);
if (code == TSDB_CODE_ACTION_IN_PROGRESS) return;
......
......@@ -892,7 +892,9 @@ STSBuf* tsBufCreate(bool autoDelete) {
return NULL;
}
allocResForTSBuf(pTSBuf);
if (NULL == allocResForTSBuf(pTSBuf)) {
return NULL;
}
// update the header info
STSBufFileHeader header = {.magic = TS_COMP_FILE_MAGIC, .numOfVnode = pTSBuf->numOfVnodes, .tsOrder = TSQL_SO_ASC};
......
......@@ -202,10 +202,10 @@ void tscKillStream(STscObj *pObj, uint32_t killId) {
tscTrace("%p stream:%p is killed, streamId:%d", pStream->pSql, pStream, killId);
}
taos_close_stream(pStream);
if (pStream->callback) {
pStream->callback(pStream->param);
}
taos_close_stream(pStream);
}
char *tscBuildQueryStreamDesc(char *pMsg, STscObj *pObj) {
......@@ -285,8 +285,9 @@ void tscKillConnection(STscObj *pObj) {
SSqlStream *pStream = pObj->streamList;
while (pStream) {
SSqlStream *tmp = pStream->next;
taos_close_stream(pStream);
pStream = pStream->next;
pStream = tmp;
}
pthread_mutex_unlock(&pObj->mutex);
......
......@@ -2404,7 +2404,7 @@ static SColumnFilterInfo* addColumnFilterInfo(SColumnBase* pColumn) {
}
int32_t size = pColumn->numOfFilters + 1;
char* tmp = realloc(pColumn->filterInfo, sizeof(SColumnFilterInfo) * (size));
char* tmp = (char*)realloc((void*)(pColumn->filterInfo), sizeof(SColumnFilterInfo) * (size));
if (tmp != NULL) {
pColumn->filterInfo = (SColumnFilterInfo*)tmp;
}
......
......@@ -757,14 +757,14 @@ void setDCLSQLElems(SSqlInfo *pInfo, int32_t type, int32_t nParam, ...) {
pInfo->type = type;
if (nParam == 0) return;
if (pInfo->pDCLInfo == NULL) pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL));
if (pInfo->pDCLInfo == NULL) pInfo->pDCLInfo = (tDCLSQL *)calloc(1, sizeof(tDCLSQL));
va_list va;
va_start(va, nParam);
while (nParam-- > 0) {
SSQLToken *pToken = va_arg(va, SSQLToken *);
tTokenListAppend(pInfo->pDCLInfo, pToken);
(void)tTokenListAppend(pInfo->pDCLInfo, pToken);
}
va_end(va);
}
......
......@@ -613,7 +613,11 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
rlen += pExpr->resBytes;
}
int32_t capacity = nBufferSizes / rlen;
int32_t capacity = 0;
if (rlen != 0) {
capacity = nBufferSizes / rlen;
}
pModel = tColModelCreate(pSchema, pQueryInfo->fieldsInfo.numOfOutputCols, capacity);
for (int32_t i = 0; i < pMeterMetaInfo->pMetricMeta->numOfVnodes; ++i) {
......
......@@ -32,13 +32,13 @@
#define TSC_MGMT_VNODE 999
#ifdef CLUSTER
SIpStrList tscMgmtIpList;
int tsMasterIndex = 0;
int tsSlaveIndex = 1;
SIpStrList tscMgmtIpList;
int tsMasterIndex = 0;
int tsSlaveIndex = 1;
#else
int tsMasterIndex = 0;
int tsSlaveIndex = 0; // slave == master for single node edition
uint32_t tsServerIp;
int tsMasterIndex = 0;
int tsSlaveIndex = 0; // slave == master for single node edition
uint32_t tsServerIp;
#endif
int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo) = {0};
......@@ -130,6 +130,11 @@ void tscProcessActivityTimer(void *handle, void *tmrId) {
pSql->fp = tscProcessHeartBeatRsp;
pSql->cmd.command = TSDB_SQL_HB;
SQueryInfo *pQueryInfo = NULL;
tscGetQueryInfoDetailSafely(&pSql->cmd, 0, &pQueryInfo);
pQueryInfo->command = TSDB_SQL_HB;
SQueryInfo *pQueryInfo = NULL;
tscGetQueryInfoDetailSafely(&pSql->cmd, 0, &pQueryInfo);
pQueryInfo->command = TSDB_SQL_HB;
......@@ -145,6 +150,8 @@ void tscProcessActivityTimer(void *handle, void *tmrId) {
pObj->pHb = pSql;
tscAddSubqueryInfo(&pObj->pHb->cmd);
tscAddSubqueryInfo(&pObj->pHb->cmd);
tscTrace("%p pHb is allocated, pObj:%p", pObj->pHb, pObj);
}
......@@ -732,6 +739,13 @@ int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSubquerySu
pNewQueryInfo->exprsInfo.numOfExprs, pNewQueryInfo->colList.numOfCols,
pNewQueryInfo->fieldsInfo.numOfOutputCols, pNewQueryInfo->pMeterInfo[0]->name);
tscPrintSelectClause(pNew, 0);
tscTrace("%p subquery:%p tableIndex:%d, vnodeIdx:%d, type:%d, transfer to ts_comp query to retrieve timestamps, "
"exprInfo:%d, colList:%d, fieldsInfo:%d, name:%s",
pSql, pNew, tableIndex, pMeterMetaInfo->vnodeIndex, pNewQueryInfo->type,
pNewQueryInfo->exprsInfo.numOfExprs, pNewQueryInfo->colList.numOfCols,
pNewQueryInfo->fieldsInfo.numOfOutputCols, pNewQueryInfo->pMeterInfo[0]->name);
tscPrintSelectClause(pNew, 0);
} else {
SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0);
pNewQueryInfo->type |= TSDB_QUERY_TYPE_SUBQUERY;
......@@ -1141,8 +1155,10 @@ static void tscHandleSubRetrievalError(SRetrieveSupport *trsupport, SSqlObj *pSq
}
}
int32_t numOfTotal = pState->numOfTotal;
int32_t finished = atomic_add_fetch_32(&pState->numOfCompleted, 1);
if (finished < pState->numOfTotal) {
if (finished < numOfTotal) {
tscTrace("%p sub:%p orderOfSub:%d freed, finished subqueries:%d", pPObj, pSql, trsupport->subqueryIndex, finished);
return tscFreeSubSqlObj(trsupport, pSql);
}
......@@ -1277,8 +1293,13 @@ void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) {
return tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_CLI_NO_DISKSPACE);
}
// keep this value local variable, since the pState variable may be released by other threads, if atomic_add opertion
// increases the finished value up to pState->numOfTotal value, which means all subqueries are completed.
// In this case, the comparsion between finished value and released pState->numOfTotal is not safe.
int32_t numOfTotal = pState->numOfTotal;
int32_t finished = atomic_add_fetch_32(&pState->numOfCompleted, 1);
if (finished < pState->numOfTotal) {
if (finished < numOfTotal) {
tscTrace("%p sub:%p orderOfSub:%d freed, finished subqueries:%d", pPObj, pSql, trsupport->subqueryIndex, finished);
return tscFreeSubSqlObj(trsupport, pSql);
}
......@@ -1287,7 +1308,7 @@ void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) {
pDesc->pSchema->maxCapacity = trsupport->pExtMemBuffer[idx]->numOfElemsPerPage;
tscTrace("%p retrieve from %d vnodes completed.final NumOfRows:%d,start to build loser tree", pPObj,
pState->numOfTotal, pState->numOfCompleted);
pState->numOfTotal, pState->numOfRetrievedRows);
SQueryInfo *pPQueryInfo = tscGetQueryInfoDetail(&pPObj->cmd, 0);
tscClearInterpInfo(pPQueryInfo);
......@@ -1439,7 +1460,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
SSqlObj *pNew = tscCreateSqlObjForSubquery(pParentSql, trsupport, pSql);
if (pNew == NULL) {
tscError("%p sub:%p failed to create new subquery due to out of memory, abort retry, vid:%d, orderOfSub:%d",
pParentSql, pSql, pSvd->vnode, trsupport->subqueryIndex);
trsupport->pParentSqlObj, pSql, pSvd != NULL ? pSvd->vnode : -1, trsupport->subqueryIndex);
pState->code = -TSDB_CODE_CLI_OUT_OF_MEMORY;
trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY;
......@@ -1464,9 +1485,14 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
tscRetrieveFromVnodeCallBack(param, tres, pState->code);
} else { // success, proceed to retrieve data from dnode
tscTrace("%p sub:%p query complete,ip:%u,vid:%d,orderOfSub:%d,retrieve data", pParentSql, pSql,
if (vnodeInfo != NULL) {
tscTrace("%p sub:%p query complete,ip:%u,vid:%d,orderOfSub:%d,retrieve data", trsupport->pParentSqlObj, pSql,
vnodeInfo->vpeerDesc[vnodeInfo->index].ip, vnodeInfo->vpeerDesc[vnodeInfo->index].vnode,
trsupport->subqueryIndex);
} else {
tscTrace("%p sub:%p query complete, orderOfSub:%d,retrieve data", trsupport->pParentSqlObj, pSql,
trsupport->subqueryIndex);
}
taos_fetch_rows_a(tres, tscRetrieveFromVnodeCallBack, param);
}
......
......@@ -207,8 +207,8 @@ int taos_query_imp(STscObj *pObj, SSqlObj *pSql) {
taosCleanUpHashTable(pSql->pTableHashList);
pSql->pTableHashList = NULL;
}
tscTrace("%p SQL: %s pObj:%p", pSql, pSql->sqlstr, pObj);
tscDump("%p pObj:%p, SQL: %s", pSql, pObj, pSql->sqlstr);
pRes->code = (uint8_t)tsParseSql(pSql, false);
......
......@@ -281,11 +281,11 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
tscTrace("%p stream:%p, etime:%" PRId64 " is too old, exceeds the max retention time window:%" PRId64 ", stop the stream",
pStream->pSql, pStream, pStream->stime, pStream->etime);
// TODO : How to terminate stream here
taos_close_stream(pStream);
if (pStream->callback) {
// Callback function from upper level
pStream->callback(pStream->param);
}
taos_close_stream(pStream);
return;
}
......@@ -339,11 +339,11 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
tscTrace("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream,
pStream->stime, pStream->etime);
// TODO : How to terminate stream here
taos_close_stream(pStream);
if (pStream->callback) {
// Callback function from upper level
pStream->callback(pStream->param);
}
taos_close_stream(pStream);
return;
}
} else {
......@@ -352,11 +352,11 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
tscTrace("%p stream:%p, stime:%ld is larger than end time: %ld, stop the stream", pStream->pSql, pStream,
pStream->stime, pStream->etime);
// TODO : How to terminate stream here
taos_close_stream(pStream);
if (pStream->callback) {
// Callback function from upper level
pStream->callback(pStream->param);
}
taos_close_stream(pStream);
return;
}
......
......@@ -701,8 +701,10 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SDataBlockList* pTableDataBlockLi
SShellSubmitBlock* pBlocks = (SShellSubmitBlock*)pOneTableBlock->pData;
sortRemoveDuplicates(pOneTableBlock);
tscTrace("%p meterId:%s, sid:%d, rows:%d, sversion:%d", pSql, pOneTableBlock->meterId, pBlocks->sid,
pBlocks->numOfRows, pBlocks->sversion);
char* e = (char*)pBlocks->payLoad + pOneTableBlock->rowSize*(pBlocks->numOfRows-1);
tscTrace("%p meterId:%s, sid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql, pOneTableBlock->meterId, pBlocks->sid,
pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->payLoad), GET_INT64_VAL(e));
pBlocks->sid = htonl(pBlocks->sid);
pBlocks->uid = htobe64(pBlocks->uid);
......
{
"name": "TDengine",
"id": "tdengine",
"id": "taosdata-tdengine-datasource",
"type": "datasource",
"partials": {
......@@ -24,8 +24,8 @@
{"name": "GitHub", "url": "https://github.com/taosdata/TDengine/tree/develop/src/connector/grafana/tdengine"},
{"name": "AGPL 3.0", "url": "https://github.com/taosdata/TDengine/tree/develop/src/connector/grafana/tdengine/LICENSE"}
],
"version": "1.6.0",
"updated": "2019-11-12"
"version": "1.0.0",
"updated": "2020-01-13"
},
"dependencies": {
......
{
"name": "TDengine",
"private": true,
"private": false,
"version": "1.0.0",
"description": "grafana datasource plugin for tdengine",
"main": "index.js",
"scripts": {
"build": "./node_modules/grunt-cli/bin/grunt",
"test": "./node_modules/grunt-cli/bin/grunt mochaTest"
......@@ -12,7 +11,7 @@
"type": "git",
"url": "git+https://github.com/taosdata/TDengine.git"
},
"author": "",
"author": "https://www.taosdata.com",
"license": "AGPL 3.0",
"bugs": {
"url": "https://github.com/taosdata/TDengine/issues"
......
{
"name": "TDengine",
"id": "tdengine",
"id": "taosdata-tdengine-datasource",
"type": "datasource",
"partials": {
......@@ -24,8 +24,8 @@
{"name": "GitHub", "url": "https://github.com/taosdata/TDengine/tree/develop/src/connector/grafana/tdengine"},
{"name": "AGPL 3.0", "url": "https://github.com/taosdata/TDengine/tree/develop/src/connector/grafana/tdengine/LICENSE"}
],
"version": "1.6.0",
"updated": "2019-11-12"
"version": "1.0.0",
"updated": "2020-01-13"
},
"dependencies": {
......
......@@ -113,7 +113,10 @@ extern uint32_t cdebugFlag;
}
#define tscPrint(...) \
{ tprintf("TSC ", 255, __VA_ARGS__); }
#define tscDump(...) \
if (cdebugFlag & DEBUG_TRACE) { \
taosPrintLongString("TSC ", cdebugFlag, __VA_ARGS__); \
}
#define jniError(...) \
if (jnidebugFlag & DEBUG_ERROR) { \
tprintf("ERROR JNI ", jnidebugFlag, __VA_ARGS__); \
......
......@@ -517,7 +517,8 @@ bool taosGetProcIO(float *readKB, float *writeKB) {
static int64_t lastReadbyte = -1;
static int64_t lastWritebyte = -1;
int64_t curReadbyte, curWritebyte;
int64_t curReadbyte = 0;
int64_t curWritebyte = 0;
if (!taosReadProcIO(&curReadbyte, &curWritebyte)) {
return false;
......
......@@ -703,7 +703,7 @@ int taosSendPacketViaTcp(uint32_t ip, uint16_t port, char *data, int dataLen, vo
pHead->msgLen = (int32_t)htonl(msgLen);
code = taosSendUdpData(ip, port, buffer, msgLen, chandle);
pHead = (STaosHeader *)data;
//pHead = (STaosHeader *)data;
tinet_ntoa(ipstr, ip);
int fd = taosOpenTcpClientSocket(ipstr, pConn->port, tsLocalIp);
......
......@@ -1383,7 +1383,7 @@ int vnodeSearchPointInFile(SMeterObj *pObj, SQuery *pQuery) {
firstSlot = 0;
lastSlot = pQuery->numOfBlocks - 1;
numOfBlocks = pQuery->numOfBlocks;
//numOfBlocks = pQuery->numOfBlocks;
if (QUERY_IS_ASC_QUERY(pQuery) && pBlock[lastSlot].keyLast < pQuery->skey) continue;
if (!QUERY_IS_ASC_QUERY(pQuery) && pBlock[firstSlot].keyFirst > pQuery->skey) continue;
......
......@@ -98,7 +98,7 @@ unsigned char *base64_decode(const char *value, int inlen, int *outlen) {
base64_decode_error:
free(result);
*result = 0;
result = 0;
*outlen = 0;
return result;
......
......@@ -516,20 +516,20 @@ tMemBucket* tMemBucketCreate(int32_t totalSlots, int32_t nBufferSize, int16_t nE
if (pDesc->pSchema->numOfCols != 1 || pDesc->pSchema->colOffset[0] != 0) {
pError("MemBucket:%p,only consecutive data is allowed,invalid numOfCols:%d or offset:%d",
*pBucket, pDesc->pSchema->numOfCols, pDesc->pSchema->colOffset[0]);
pBucket, pDesc->pSchema->numOfCols, pDesc->pSchema->colOffset[0]);
tfree(pBucket);
return NULL;
}
if (pDesc->pSchema->pFields[0].type != dataType) {
pError("MemBucket:%p,data type is not consistent,%d in schema, %d in param", *pBucket,
pError("MemBucket:%p,data type is not consistent,%d in schema, %d in param", pBucket,
pDesc->pSchema->pFields[0].type, dataType);
tfree(pBucket);
return NULL;
}
if (pBucket->numOfTotalPages < pBucket->nTotalSlots) {
pWarn("MemBucket:%p,total buffer pages %d are not enough for all slots", *pBucket, pBucket->numOfTotalPages);
pWarn("MemBucket:%p,total buffer pages %d are not enough for all slots", pBucket, pBucket->numOfTotalPages);
}
pBucket->pSegs = (tMemBucketSegment *)malloc(pBucket->numOfSegs * sizeof(tMemBucketSegment));
......@@ -540,7 +540,7 @@ tMemBucket* tMemBucketCreate(int32_t totalSlots, int32_t nBufferSize, int16_t nE
pBucket->pSegs[i].pBoundingEntries = NULL;
}
pTrace("MemBucket:%p,created,buffer size:%d,elem size:%d", *pBucket, pBucket->numOfTotalPages * DEFAULT_PAGE_SIZE,
pTrace("MemBucket:%p,created,buffer size:%d,elem size:%d", pBucket, pBucket->numOfTotalPages * DEFAULT_PAGE_SIZE,
pBucket->nElemSize);
return pBucket;
......@@ -1258,6 +1258,7 @@ static tFilePage *loadIntoBucketFromDisk(tMemBucket *pMemBucket, int32_t segIdx,
for (uint32_t j = 0; j < pFlushInfo->numOfPages; ++j) {
ret = fread(pPage, pMemBuffer->nPageSize, 1, pMemBuffer->dataFile);
UNUSED(ret);
assert(pPage->numOfElems > 0);
tColModelAppend(pDesc->pSchema, buffer, pPage->data, 0, pPage->numOfElems, pPage->numOfElems);
......@@ -1917,6 +1918,7 @@ double getPercentileImpl(tMemBucket *pMemBucket, int32_t count, double fraction)
for (uint32_t jx = 0; jx < pFlushInfo->numOfPages; ++jx) {
ret = fread(pPage, pMemBuffer->nPageSize, 1, pMemBuffer->dataFile);
UNUSED(ret);
tMemBucketPut(pMemBucket, pPage->data, pPage->numOfElems);
}
......
......@@ -292,8 +292,8 @@ int32_t parseLocaltime(char* timestr, int64_t* time, int32_t timePrec) {
}
/* mktime will be affected by TZ, set by using taos_options */
int64_t seconds = mktime(&tm);
//int64_t seconds = (int64_t)user_mktime(&tm);
//int64_t seconds = mktime(&tm);
int64_t seconds = (int64_t)user_mktime(&tm);
int64_t fraction = 0;
......
......@@ -725,7 +725,7 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, char type) {
*((int64_t *)payload) = TSDB_DATA_DOUBLE_NULL;
return 0;
} else {
double value;
double value = 0;
int32_t ret;
ret = convertToDouble(pVariant->pz, pVariant->nLen, &value);
if ((errno == ERANGE && value == -1) || (ret != 0)) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册