diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index 63009e5421ec9db8e787980846c00b14beaab75a..2a078b5eabd6129ae7bf6bc4302e888341329ea4 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -481,11 +481,11 @@ function install_adapter_config() { ${csudo}mkdir -p ${cfg_install_dir} [ -f ${script_dir}/cfg/${adapterName}.toml ] && ${csudo}cp ${script_dir}/cfg/${adapterName}.toml ${cfg_install_dir} [ -f ${cfg_install_dir}/${adapterName}.toml ] && ${csudo}chmod 644 ${cfg_install_dir}/${adapterName}.toml + else + [ -f ${script_dir}/cfg/${adapterName}.toml ] && + ${csudo}cp -f ${script_dir}/cfg/${adapterName}.toml ${cfg_install_dir}/${adapterName}.toml.new fi - [ -f ${script_dir}/cfg/${adapterName}.toml ] && - ${csudo}cp -f ${script_dir}/cfg/${adapterName}.toml ${cfg_install_dir}/${adapterName}.toml.new - [ -f ${cfg_install_dir}/${adapterName}.toml ] && ${csudo}ln -s ${cfg_install_dir}/${adapterName}.toml ${install_main_dir}/cfg/${adapterName}.toml @@ -499,9 +499,10 @@ function install_config() { ${csudo}mkdir -p ${cfg_install_dir} [ -f ${script_dir}/cfg/${configFile} ] && ${csudo}cp ${script_dir}/cfg/${configFile} ${cfg_install_dir} ${csudo}chmod 644 ${cfg_install_dir}/* + else + ${csudo}cp -f ${script_dir}/cfg/${configFile} ${cfg_install_dir}/${configFile}.new fi - ${csudo}cp -f ${script_dir}/cfg/${configFile} ${cfg_install_dir}/${configFile}.new ${csudo}ln -s ${cfg_install_dir}/${configFile} ${install_main_dir}/cfg [ ! -z $1 ] && return 0 || : # only install client diff --git a/source/common/src/systable.c b/source/common/src/systable.c index 60a673ef9cf25b61e8889f4941802a8500b726a2..6c86743b696e23ddcdc403860ecafaeb2caa6448 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -67,6 +67,8 @@ static const SSysDbTableSchema clusterSchema[] = { {.name = "name", .bytes = TSDB_CLUSTER_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, {.name = "uptime", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true}, + {.name = "version", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "expire_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true}, }; static const SSysDbTableSchema userDBSchema[] = { diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmFile.c b/source/dnode/mgmt/mgmt_mnode/src/mmFile.c index dd05fe673a4f3d18843a6f07513d803daf7adaa5..f06669a610142a38b6b2937c95bd150c324df568 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmFile.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmFile.c @@ -17,117 +17,97 @@ #include "mmInt.h" #include "tjson.h" +static int32_t mmDecodeOption(SJson *pJson, SMnodeOpt *pOption) { + int32_t code = 0; + + tjsonGetInt32ValueFromDouble(pJson, "deployed", pOption->deploy, code); + if (code < 0) return -1; + tjsonGetInt32ValueFromDouble(pJson, "selfIndex", pOption->selfIndex, code); + if (code < 0) return 0; + + SJson *replicas = tjsonGetObjectItem(pJson, "replicas"); + if (replicas == NULL) return 0; + pOption->numOfReplicas = tjsonGetArraySize(replicas); + + for (int32_t i = 0; i < pOption->numOfReplicas; ++i) { + SJson *replica = tjsonGetArrayItem(replicas, i); + if (replica == NULL) return -1; + + SReplica *pReplica = pOption->replicas + i; + tjsonGetInt32ValueFromDouble(replica, "id", pReplica->id, code); + if (code < 0) return -1; + code = tjsonGetStringValue(replica, "fqdn", pReplica->fqdn); + if (code < 0) return -1; + tjsonGetUInt16ValueFromDouble(replica, "port", pReplica->port, code); + if (code < 0) return -1; + } + + return 0; +} + int32_t mmReadFile(const char *path, SMnodeOpt *pOption) { - int32_t code = TSDB_CODE_INVALID_JSON_FORMAT; - int32_t len = 0; - int32_t maxLen = 4096; - char *content = taosMemoryCalloc(1, maxLen + 1); - cJSON *root = NULL; - char file[PATH_MAX] = {0}; + int32_t code = -1; TdFilePtr pFile = NULL; - + char *pData = NULL; + SJson *pJson = NULL; + char file[PATH_MAX] = {0}; snprintf(file, sizeof(file), "%s%smnode.json", path, TD_DIRSEP); + + if (taosStatFile(file, NULL, NULL) < 0) { + dInfo("mnode file:%s not exist", file); + return 0; + } + pFile = taosOpenFile(file, TD_FILE_READ); if (pFile == NULL) { - code = 0; + terrno = TAOS_SYSTEM_ERROR(errno); + dError("failed to open mnode file:%s since %s", file, terrstr()); goto _OVER; } - len = (int32_t)taosReadFile(pFile, content, maxLen); - if (len <= 0) { - dError("failed to read %s since content is null", file); + int64_t size = 0; + if (taosFStatFile(pFile, &size, NULL) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + dError("failed to fstat mnode file:%s since %s", file, terrstr()); goto _OVER; } - content[len] = 0; - root = cJSON_Parse(content); - if (root == NULL) { - dError("failed to read %s since invalid json format", file); + pData = taosMemoryMalloc(size + 1); + if (pData == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; goto _OVER; } - cJSON *deployed = cJSON_GetObjectItem(root, "deployed"); - if (!deployed || deployed->type != cJSON_Number) { - dError("failed to read %s since deployed not found", file); + if (taosReadFile(pFile, pData, size) != size) { + terrno = TAOS_SYSTEM_ERROR(errno); + dError("failed to read mnode file:%s since %s", file, terrstr()); goto _OVER; } - pOption->deploy = deployed->valueint; - cJSON *selfIndex = cJSON_GetObjectItem(root, "selfIndex"); - if (selfIndex) { - if (selfIndex->type != cJSON_Number) { - dError("failed to read %s since selfIndex not found", file); - goto _OVER; - } - pOption->selfIndex = selfIndex->valueint; - } + pData[size] = '\0'; - cJSON *replicas = cJSON_GetObjectItem(root, "replicas"); - if (replicas) { - if (replicas->type != cJSON_Array) { - dError("failed to read %s since replicas not found", file); - goto _OVER; - } + pJson = tjsonParse(pData); + if (pJson == NULL) { + terrno = TSDB_CODE_INVALID_JSON_FORMAT; + goto _OVER; + } - int32_t numOfReplicas = cJSON_GetArraySize(replicas); - if (numOfReplicas <= 0) { - dError("failed to read %s since numOfReplicas:%d invalid", file, numOfReplicas); - goto _OVER; - } - pOption->numOfReplicas = numOfReplicas; - - for (int32_t i = 0; i < numOfReplicas; ++i) { - SReplica *pReplica = pOption->replicas + i; - - cJSON *replica = cJSON_GetArrayItem(replicas, i); - if (replica == NULL) break; - - cJSON *id = cJSON_GetObjectItem(replica, "id"); - if (id) { - if (id->type != cJSON_Number) { - dError("failed to read %s since id not found", file); - goto _OVER; - } - if (pReplica) { - pReplica->id = id->valueint; - } - } - - cJSON *fqdn = cJSON_GetObjectItem(replica, "fqdn"); - if (fqdn) { - if (fqdn->type != cJSON_String || fqdn->valuestring == NULL) { - dError("failed to read %s since fqdn not found", file); - goto _OVER; - } - if (pReplica) { - tstrncpy(pReplica->fqdn, fqdn->valuestring, TSDB_FQDN_LEN); - } - } - - cJSON *port = cJSON_GetObjectItem(replica, "port"); - if (port) { - if (port->type != cJSON_Number) { - dError("failed to read %s since port not found", file); - goto _OVER; - } - if (pReplica) { - pReplica->port = (uint16_t)port->valueint; - } - } - } + if (mmDecodeOption(pJson, pOption) < 0) { + terrno = TSDB_CODE_INVALID_JSON_FORMAT; + goto _OVER; } code = 0; + dInfo("succceed to read mnode file %s", file); _OVER: - if (content != NULL) taosMemoryFree(content); - if (root != NULL) cJSON_Delete(root); + if (pData != NULL) taosMemoryFree(pData); + if (pJson != NULL) cJSON_Delete(pJson); if (pFile != NULL) taosCloseFile(&pFile); - if (code == 0) { - dDebug("succcessed to read file %s, deployed:%d", file, pOption->deploy); - } - terrno = code; + if (code != 0) { + dError("failed to read mnode file:%s since %s", file, terrstr()); + } return code; } diff --git a/source/dnode/mnode/impl/src/mndCluster.c b/source/dnode/mnode/impl/src/mndCluster.c index ca03207d2b95bab562c48a02ca383d67bba7349d..e0d8ecb3eb949d4161f5330d62250b3267ed513b 100644 --- a/source/dnode/mnode/impl/src/mndCluster.c +++ b/source/dnode/mnode/impl/src/mndCluster.c @@ -20,6 +20,8 @@ #define CLUSTER_VER_NUMBE 1 #define CLUSTER_RESERVE_SIZE 60 +char tsVersionName[16] = "community"; +int64_t tsExpireTime = 0; static SSdbRaw *mndClusterActionEncode(SClusterObj *pCluster); static SSdbRow *mndClusterActionDecode(SSdbRaw *pRaw); @@ -291,6 +293,18 @@ static int32_t mndRetrieveClusters(SRpcMsg *pMsg, SShowObj *pShow, SSDataBlock * pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, (const char *)&pCluster->createdTime, false); + char ver[12] = {0}; + STR_WITH_MAXSIZE_TO_VARSTR(ver, tsVersionName, pShow->pMeta->pSchemas[cols].bytes); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char *)ver, false); + + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + if (tsExpireTime <= 0) { + colDataAppendNULL(pColInfo, numOfRows); + } else { + colDataAppend(pColInfo, numOfRows, (const char *)&tsExpireTime, false); + } + sdbRelease(pSdb, pCluster); numOfRows++; } diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index dcfc78fd1a530a2d5eb5b6085170aa5363a21cc2..91690af4c8e6a98dc75ae1839d171def11ed478e 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -2834,7 +2834,37 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) { TSDBKEY keyInBuf = getCurrentKeyInBuf(pScanInfo, pReader); if (pBlockInfo == NULL) { // build data block from last data file - code = buildComposedDataBlock(pReader); + SBlockData* pBData = &pReader->status.fileBlockData; + tBlockDataReset(pBData); + + SSDataBlock* pResBlock = pReader->pResBlock; + tsdbDebug("load data in last block firstly, due to desc scan data, %s", pReader->idStr); + + int64_t st = taosGetTimestampUs(); + + while (1) { + bool hasBlockLData = hasDataInLastBlock(pLastBlockReader); + + // no data in last block and block, no need to proceed. + if (hasBlockLData == false) { + break; + } + + buildComposedDataBlockImpl(pReader, pScanInfo, &pReader->status.fileBlockData, pLastBlockReader); + if (pResBlock->info.rows >= pReader->capacity) { + break; + } + } + + double el = (taosGetTimestampUs() - st) / 1000.0; + updateComposedBlockInfo(pReader, el, pScanInfo); + + if (pResBlock->info.rows > 0) { + tsdbDebug("%p uid:%" PRIu64 ", composed data block created, brange:%" PRIu64 "-%" PRIu64 + " rows:%d, elapsed time:%.2f ms %s", + pReader, pResBlock->info.id.uid, pResBlock->info.window.skey, pResBlock->info.window.ekey, + pResBlock->info.rows, el, pReader->idStr); + } } else if (fileBlockShouldLoad(pReader, pBlockInfo, pBlock, pScanInfo, keyInBuf, pLastBlockReader)) { code = doLoadFileBlockData(pReader, pBlockIter, &pStatus->fileBlockData, pScanInfo->uid); if (code != TSDB_CODE_SUCCESS) { @@ -2853,10 +2883,38 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) { // only return the rows in last block int64_t tsLast = getCurrentKeyInLastBlock(pLastBlockReader); ASSERT(tsLast >= pBlock->maxKey.ts); - tBlockDataReset(&pReader->status.fileBlockData); + SBlockData* pBData = &pReader->status.fileBlockData; + tBlockDataReset(pBData); + + SSDataBlock* pResBlock = pReader->pResBlock; tsdbDebug("load data in last block firstly, due to desc scan data, %s", pReader->idStr); - code = buildComposedDataBlock(pReader); + + int64_t st = taosGetTimestampUs(); + + while (1) { + bool hasBlockLData = hasDataInLastBlock(pLastBlockReader); + + // no data in last block and block, no need to proceed. + if (hasBlockLData == false) { + break; + } + + buildComposedDataBlockImpl(pReader, pScanInfo, &pReader->status.fileBlockData, pLastBlockReader); + if (pResBlock->info.rows >= pReader->capacity) { + break; + } + } + + double el = (taosGetTimestampUs() - st) / 1000.0; + updateComposedBlockInfo(pReader, el, pScanInfo); + + if (pResBlock->info.rows > 0) { + tsdbDebug("%p uid:%" PRIu64 ", composed data block created, brange:%" PRIu64 "-%" PRIu64 + " rows:%d, elapsed time:%.2f ms %s", + pReader, pResBlock->info.id.uid, pResBlock->info.window.skey, pResBlock->info.window.ekey, + pResBlock->info.rows, el, pReader->idStr); + } } else { // whole block is required, return it directly SDataBlockInfo* pInfo = &pReader->pResBlock->info; pInfo->rows = pBlock->nRow; diff --git a/tests/script/tsim/parser/regressiontest.sim b/tests/script/tsim/parser/regressiontest.sim index 98cb0248a1959677adf3a544f205fb77498e9c0c..1b127155cbed8f9eaf26898c27bd87e46ed1d7e4 100644 --- a/tests/script/tsim/parser/regressiontest.sim +++ b/tests/script/tsim/parser/regressiontest.sim @@ -58,4 +58,9 @@ if $data40 != @18-09-17 09:06:49.600@ then return -1 endi +sql select * from $tb order by ts desc; +if $rows != 8198 then + return -1 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT