提交 325bb79a 编写于 作者: dengyihao's avatar dengyihao

resolve conflict

......@@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "2.0.6.0")
SET(TD_VER_NUMBER "2.0.7.0")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
......
......@@ -48,7 +48,7 @@ cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_pat
cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/../packaging/tools/preun.sh ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/build/bin/taosdemo ${pkg_dir}${install_home_path}/bin
#cp ${compile_dir}/build/bin/taosdump ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosdump ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver
......
......@@ -58,7 +58,7 @@ cp %{_compiledir}/../packaging/tools/preun.sh %{buildroot}%{homepath}/scri
cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosdemo %{buildroot}%{homepath}/bin
#cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include
......
#!/bin/bash
#
log_dir=$1
result_file=$2
if [ ! -n "$1" ];then
echo "Pleas input the director of taosdlog."
echo "usage: ./get_client.sh <taosdlog directory> <result file>"
exit 1
else
log_dir=$1
fi
if [ ! -n "$2" ];then
result_file=clientInfo.txt
else
result_file=$2
fi
grep "new TCP connection" ${log_dir}/taosdlog.* | sed -e "s/0x.* from / /"|sed -e "s/,.*$//"|sed -e "s/:[0-9]*$//"|sort -r|uniq -f 2|sort -k 3 -r|uniq -f 2 > ${result_file}
......@@ -45,8 +45,7 @@ if [ "$osType" != "Darwin" ]; then
strip ${build_dir}/bin/taos
bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh"
else
#bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${script_dir}/remove_client.sh ${script_dir}/set_core.sh"
bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${script_dir}/remove_client.sh ${script_dir}/set_core.sh"
bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${script_dir}/remove_client.sh ${script_dir}/set_core.sh ${script_dir}/get_client.sh"
fi
lib_files="${build_dir}/lib/libtaos.so.${version}"
else
......
......@@ -76,8 +76,10 @@ if [ "$osType" != "Darwin" ]; then
else
cp ${build_dir}/bin/taos ${install_dir}/bin/power
cp ${script_dir}/remove_power.sh ${install_dir}/bin
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo
cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump
cp ${script_dir}/set_core.sh ${install_dir}/bin
cp ${script_dir}/get_client.sh ${install_dir}/bin
fi
else
cp ${bin_files} ${install_dir}/bin
......
......@@ -36,8 +36,7 @@ if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/taos
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh"
else
#bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove.sh ${script_dir}/set_core.sh"
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove.sh ${script_dir}/set_core.sh"
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove.sh ${script_dir}/set_core.sh ${script_dir}/get_client.sh"
fi
lib_files="${build_dir}/lib/libtaos.so.${version}"
......
......@@ -77,8 +77,10 @@ else
cp ${build_dir}/bin/taosd ${install_dir}/bin/powerd
cp ${script_dir}/remove_power.sh ${install_dir}/bin
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo
cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump
cp ${build_dir}/bin/tarbitrator ${install_dir}/bin
cp ${script_dir}/set_core.sh ${install_dir}/bin
cp ${script_dir}/get_client.sh ${install_dir}/bin
fi
chmod a+x ${install_dir}/bin/* || :
......
name: tdengine
base: core18
version: '2.0.6.0'
version: '2.0.7.0'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
description: |
......
......@@ -46,7 +46,7 @@ extern "C" {
typedef struct {
void *appH;
void *cqH;
int (*notifyStatus)(void *, int status);
int (*notifyStatus)(void *, int status, int eno);
int (*eventCallBack)(void *);
void *(*cqCreateFunc)(void *handle, uint64_t uid, int sid, char *sqlStr, STSchema *pSchema);
void (*cqDropFunc)(void *handle);
......@@ -83,7 +83,7 @@ STsdbCfg *tsdbGetCfg(const TSDB_REPO_T *repo);
int tsdbCreateRepo(char *rootDir, STsdbCfg *pCfg);
int32_t tsdbDropRepo(char *rootDir);
TSDB_REPO_T *tsdbOpenRepo(char *rootDir, STsdbAppH *pAppH);
void tsdbCloseRepo(TSDB_REPO_T *repo, int toCommit);
int tsdbCloseRepo(TSDB_REPO_T *repo, int toCommit);
int32_t tsdbConfigRepo(TSDB_REPO_T *repo, STsdbCfg *pCfg);
int tsdbGetState(TSDB_REPO_T *repo);
......
......@@ -3,3 +3,4 @@ PROJECT(TDengine)
ADD_SUBDIRECTORY(shell)
ADD_SUBDIRECTORY(taosdemo)
ADD_SUBDIRECTORY(taosdump)
......@@ -46,7 +46,7 @@ static struct argp_option options[] = {
{"thread", 'T', "THREADNUM", 0, "Number of threads when using multi-thread to import data."},
{"database", 'd', "DATABASE", 0, "Database to use when connecting to the server."},
{"timezone", 't', "TIMEZONE", 0, "Time zone of the shell, default is local."},
{"netrole", 'n', "NETROLE", 0, "Net role when network connectivity test, default is NULL, valid option: client | server."},
{"netrole", 'n', "NETROLE", 0, "Net role when network connectivity test, default is NULL, options: client|clients|server."},
{"endport", 'e', "ENDPORT", 0, "Net test end port, default is 6042."},
{"pktlen", 'l', "PKTLEN", 0, "Packet length used for net test, default is 1000 bytes."},
{0}};
......
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc)
INCLUDE_DIRECTORIES(inc)
AUX_SOURCE_DIRECTORY(. SRC)
IF (TD_LINUX)
ADD_EXECUTABLE(taosdump ${SRC})
IF (TD_SOMODE_STATIC)
TARGET_LINK_LIBRARIES(taosdump taos_static)
ELSE ()
TARGET_LINK_LIBRARIES(taosdump taos)
ENDIF ()
ENDIF ()
此差异已折叠。
taos1_6="/root/mnt/work/test/td1.6/build/bin/taos"
taosdump1_6="/root/mnt/work/test/td1.6/build/bin/taosdump"
taoscfg1_6="/root/mnt/work/test/td1.6/test/cfg"
taos2_0="/root/mnt/work/test/td2.0/build/bin/taos"
taosdump2_0="/root/mnt/work/test/td2.0/build/bin/taosdump"
taoscfg2_0="/root/mnt/work/test/td2.0/test/cfg"
data_dir="/root/mnt/work/test/td1.6/output"
table_list="/root/mnt/work/test/td1.6/tables"
DBNAME="test"
NTABLES=$(wc -l ${table_list} | awk '{print $1;}')
NTABLES_PER_DUMP=101
mkdir -p ${data_dir}
i=0
round=0
command="${taosdump1_6} -c ${taoscfg1_6} -o ${data_dir} -N 100 -T 20 ${DBNAME}"
while IFS= read -r line
do
i=$((i+1))
command="${command} ${line}"
if [[ "$i" -eq ${NTABLES_PER_DUMP} ]]; then
round=$((round+1))
echo "Starting round ${round} dump out..."
rm -f ${data_dir}/*
${command}
echo "Starting round ${round} dump in..."
${taosdump2_0} -c ${taoscfg2_0} -i ${data_dir}
# Reset variables
# command="${taosdump1_6} -c ${taoscfg1_6} -o ${data_dir} -N 100 ${DBNAME}"
command="${taosdump1_6} -c ${taoscfg1_6} -o ${data_dir} -N 100 -T 20 ${DBNAME}"
i=0
fi
done < "${table_list}"
if [[ ${i} -ne "0" ]]; then
round=$((round+1))
echo "Starting round ${round} dump out..."
rm -f ${data_dir}/*
${command}
echo "Starting round ${round} dump in..."
${taosdump2_0} -c ${taoscfg2_0} -i ${data_dir}
fi
......@@ -578,6 +578,7 @@ static void syncChooseMaster(SSyncNode *pNode) {
#if 0
for (int32_t i = 0; i < pNode->replica; ++i) {
if (i == index) continue;
pPeer = pNode->peerInfo[i];
if (pPeer->version == nodeVersion) {
pPeer->role = TAOS_SYNC_ROLE_SLAVE;
......
......@@ -182,6 +182,8 @@ static int32_t syncReadOneWalRecord(int32_t sfd, SWalHead *pHead, uint32_t *pEve
return 0;
}
assert(pHead->len <= TSDB_MAX_WAL_SIZE);
ret = read(sfd, pHead->cont, pHead->len);
if (ret < 0) return -1;
......
......@@ -208,6 +208,18 @@ typedef struct {
} SFileGroupIter;
// ------------------ tsdbMain.c
typedef struct {
int32_t totalLen;
int32_t len;
SDataRow row;
} SSubmitBlkIter;
typedef struct {
int32_t totalLen;
int32_t len;
void * pMsg;
} SSubmitMsgIter;
typedef struct {
int8_t state;
......@@ -223,6 +235,7 @@ typedef struct {
sem_t readyToCommit;
pthread_mutex_t mutex;
bool repoLocked;
int32_t code; // Commit code
} STsdbRepo;
// ------------------ tsdbRWHelper.c
......@@ -430,7 +443,6 @@ void tsdbCloseBufPool(STsdbRepo* pRepo);
SListNode* tsdbAllocBufBlockFromPool(STsdbRepo* pRepo);
// ------------------ tsdbMemTable.c
int tsdbUpdateRowInMem(STsdbRepo* pRepo, SDataRow row, STable* pTable);
int tsdbRefMemTable(STsdbRepo* pRepo, SMemTable* pMemTable);
int tsdbUnRefMemTable(STsdbRepo* pRepo, SMemTable* pMemTable);
int tsdbTakeMemSnapshot(STsdbRepo* pRepo, SMemTable** pMem, SMemTable** pIMem);
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "tsdbMain.h"
static int tsdbCommitTSData(STsdbRepo *pRepo);
static int tsdbCommitMeta(STsdbRepo *pRepo);
static void tsdbEndCommit(STsdbRepo *pRepo, int eno);
static int tsdbHasDataToCommit(SCommitIter *iters, int nIters, TSKEY minKey, TSKEY maxKey);
static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHelper *pHelper, SDataCols *pDataCols);
static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo);
static void tsdbDestroyCommitIters(SCommitIter *iters, int maxTables);
void *tsdbCommitData(STsdbRepo *pRepo) {
SMemTable * pMem = pRepo->imem;
tsdbInfo("vgId:%d start to commit! keyFirst %" PRId64 " keyLast %" PRId64 " numOfRows %" PRId64 " meta rows: %d",
REPO_ID(pRepo), pMem->keyFirst, pMem->keyLast, pMem->numOfRows, listNEles(pMem->actList));
pRepo->code = TSDB_CODE_SUCCESS;
// Commit to update meta file
if (tsdbCommitMeta(pRepo) < 0) {
tsdbError("vgId:%d error occurs while committing META data since %s", REPO_ID(pRepo), tstrerror(terrno));
goto _err;
}
// Create the iterator to read from cache
if (tsdbCommitTSData(pRepo) < 0) {
tsdbError("vgId:%d error occurs while committing TS data since %s", REPO_ID(pRepo), tstrerror(terrno));
goto _err;
}
tsdbFitRetention(pRepo);
tsdbInfo("vgId:%d commit over, succeed", REPO_ID(pRepo));
tsdbEndCommit(pRepo, TSDB_CODE_SUCCESS);
return NULL;
_err:
ASSERT(terrno != TSDB_CODE_SUCCESS);
pRepo->code = terrno;
tsdbInfo("vgId:%d commit over, failed", REPO_ID(pRepo));
tsdbEndCommit(pRepo, terrno);
return NULL;
}
static int tsdbCommitTSData(STsdbRepo *pRepo) {
SMemTable * pMem = pRepo->imem;
SDataCols * pDataCols = NULL;
STsdbMeta * pMeta = pRepo->tsdbMeta;
SCommitIter *iters = NULL;
SRWHelper whelper = {0};
STsdbCfg * pCfg = &(pRepo->config);
if (pMem->numOfRows <= 0) return 0;
iters = tsdbCreateCommitIters(pRepo);
if (iters == NULL) {
tsdbError("vgId:%d failed to create commit iterator since %s", REPO_ID(pRepo), tstrerror(terrno));
goto _err;
}
if (tsdbInitWriteHelper(&whelper, pRepo) < 0) {
tsdbError("vgId:%d failed to init write helper since %s", REPO_ID(pRepo), tstrerror(terrno));
goto _err;
}
if ((pDataCols = tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pCfg->maxRowsPerFileBlock)) == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
tsdbError("vgId:%d failed to init data cols with maxRowBytes %d maxCols %d maxRowsPerFileBlock %d since %s",
REPO_ID(pRepo), pMeta->maxCols, pMeta->maxRowBytes, pCfg->maxRowsPerFileBlock, tstrerror(terrno));
goto _err;
}
int sfid = (int)(TSDB_KEY_FILEID(pMem->keyFirst, pCfg->daysPerFile, pCfg->precision));
int efid = (int)(TSDB_KEY_FILEID(pMem->keyLast, pCfg->daysPerFile, pCfg->precision));
// Loop to commit to each file
for (int fid = sfid; fid <= efid; fid++) {
if (tsdbCommitToFile(pRepo, fid, iters, &whelper, pDataCols) < 0) {
tsdbError("vgId:%d failed to commit to file %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno));
goto _err;
}
}
tdFreeDataCols(pDataCols);
tsdbDestroyCommitIters(iters, pMem->maxTables);
tsdbDestroyHelper(&whelper);
return 0;
_err:
tdFreeDataCols(pDataCols);
tsdbDestroyCommitIters(iters, pMem->maxTables);
tsdbDestroyHelper(&whelper);
return -1;
}
static int tsdbCommitMeta(STsdbRepo *pRepo) {
SMemTable *pMem = pRepo->imem;
STsdbMeta *pMeta = pRepo->tsdbMeta;
SActObj * pAct = NULL;
SActCont * pCont = NULL;
if (listNEles(pMem->actList) <= 0) return 0;
if (tdKVStoreStartCommit(pMeta->pStore) < 0) {
tsdbError("vgId:%d failed to commit data while start commit meta since %s", REPO_ID(pRepo), tstrerror(terrno));
goto _err;
}
SListNode *pNode = NULL;
while ((pNode = tdListPopHead(pMem->actList)) != NULL) {
pAct = (SActObj *)pNode->data;
if (pAct->act == TSDB_UPDATE_META) {
pCont = (SActCont *)POINTER_SHIFT(pAct, sizeof(SActObj));
if (tdUpdateKVStoreRecord(pMeta->pStore, pAct->uid, (void *)(pCont->cont), pCont->len) < 0) {
tsdbError("vgId:%d failed to update meta with uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid,
tstrerror(terrno));
tdKVStoreEndCommit(pMeta->pStore);
goto _err;
}
} else if (pAct->act == TSDB_DROP_META) {
if (tdDropKVStoreRecord(pMeta->pStore, pAct->uid) < 0) {
tsdbError("vgId:%d failed to drop meta with uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid,
tstrerror(terrno));
tdKVStoreEndCommit(pMeta->pStore);
goto _err;
}
} else {
ASSERT(false);
}
}
if (tdKVStoreEndCommit(pMeta->pStore) < 0) {
tsdbError("vgId:%d failed to commit data while end commit meta since %s", REPO_ID(pRepo), tstrerror(terrno));
goto _err;
}
return 0;
_err:
return -1;
}
static void tsdbEndCommit(STsdbRepo *pRepo, int eno) {
if (pRepo->appH.notifyStatus) pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_OVER, eno);
sem_post(&(pRepo->readyToCommit));
}
static int tsdbHasDataToCommit(SCommitIter *iters, int nIters, TSKEY minKey, TSKEY maxKey) {
for (int i = 0; i < nIters; i++) {
TSKEY nextKey = tsdbNextIterKey((iters + i)->pIter);
if (nextKey != TSDB_DATA_TIMESTAMP_NULL && (nextKey >= minKey && nextKey <= maxKey)) return 1;
}
return 0;
}
static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHelper *pHelper, SDataCols *pDataCols) {
char * dataDir = NULL;
STsdbCfg * pCfg = &pRepo->config;
STsdbFileH *pFileH = pRepo->tsdbFileH;
SFileGroup *pGroup = NULL;
SMemTable * pMem = pRepo->imem;
bool newLast = false;
TSKEY minKey = 0, maxKey = 0;
tsdbGetFidKeyRange(pCfg->daysPerFile, pCfg->precision, fid, &minKey, &maxKey);
// Check if there are data to commit to this file
int hasDataToCommit = tsdbHasDataToCommit(iters, pMem->maxTables, minKey, maxKey);
if (!hasDataToCommit) {
tsdbDebug("vgId:%d no data to commit to file %d", REPO_ID(pRepo), fid);
return 0;
}
// Create and open files for commit
dataDir = tsdbGetDataDirName(pRepo->rootDir);
if (dataDir == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
return -1;
}
if ((pGroup = tsdbCreateFGroupIfNeed(pRepo, dataDir, fid)) == NULL) {
tsdbError("vgId:%d failed to create file group %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno));
goto _err;
}
// Open files for write/read
if (tsdbSetAndOpenHelperFile(pHelper, pGroup) < 0) {
tsdbError("vgId:%d failed to set helper file since %s", REPO_ID(pRepo), tstrerror(terrno));
goto _err;
}
newLast = TSDB_NLAST_FILE_OPENED(pHelper);
if (tsdbLoadCompIdx(pHelper, NULL) < 0) {
tsdbError("vgId:%d failed to load SCompIdx part since %s", REPO_ID(pRepo), tstrerror(terrno));
goto _err;
}
// Loop to commit data in each table
for (int tid = 1; tid < pMem->maxTables; tid++) {
SCommitIter *pIter = iters + tid;
if (pIter->pTable == NULL) continue;
taosRLockLatch(&(pIter->pTable->latch));
if (tsdbSetHelperTable(pHelper, pIter->pTable, pRepo) < 0) goto _err;
if (pIter->pIter != NULL) {
if (tdInitDataCols(pDataCols, tsdbGetTableSchemaImpl(pIter->pTable, false, false, -1)) < 0) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
goto _err;
}
if (tsdbCommitTableData(pHelper, pIter, pDataCols, maxKey) < 0) {
taosRUnLockLatch(&(pIter->pTable->latch));
tsdbError("vgId:%d failed to write data of table %s tid %d uid %" PRIu64 " since %s", REPO_ID(pRepo),
TABLE_CHAR_NAME(pIter->pTable), TABLE_TID(pIter->pTable), TABLE_UID(pIter->pTable),
tstrerror(terrno));
goto _err;
}
}
taosRUnLockLatch(&(pIter->pTable->latch));
// Move the last block to the new .l file if neccessary
if (tsdbMoveLastBlockIfNeccessary(pHelper) < 0) {
tsdbError("vgId:%d, failed to move last block, since %s", REPO_ID(pRepo), tstrerror(terrno));
goto _err;
}
// Write the SCompBlock part
if (tsdbWriteCompInfo(pHelper) < 0) {
tsdbError("vgId:%d, failed to write compInfo part since %s", REPO_ID(pRepo), tstrerror(terrno));
goto _err;
}
}
if (tsdbWriteCompIdx(pHelper) < 0) {
tsdbError("vgId:%d failed to write compIdx part to file %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno));
goto _err;
}
tfree(dataDir);
tsdbCloseHelperFile(pHelper, 0, pGroup);
pthread_rwlock_wrlock(&(pFileH->fhlock));
(void)rename(helperNewHeadF(pHelper)->fname, helperHeadF(pHelper)->fname);
pGroup->files[TSDB_FILE_TYPE_HEAD].info = helperNewHeadF(pHelper)->info;
if (newLast) {
(void)rename(helperNewLastF(pHelper)->fname, helperLastF(pHelper)->fname);
pGroup->files[TSDB_FILE_TYPE_LAST].info = helperNewLastF(pHelper)->info;
} else {
pGroup->files[TSDB_FILE_TYPE_LAST].info = helperLastF(pHelper)->info;
}
pGroup->files[TSDB_FILE_TYPE_DATA].info = helperDataF(pHelper)->info;
pthread_rwlock_unlock(&(pFileH->fhlock));
return 0;
_err:
tfree(dataDir);
tsdbCloseHelperFile(pHelper, 1, NULL);
return -1;
}
static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo) {
SMemTable *pMem = pRepo->imem;
STsdbMeta *pMeta = pRepo->tsdbMeta;
SCommitIter *iters = (SCommitIter *)calloc(pMem->maxTables, sizeof(SCommitIter));
if (iters == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
return NULL;
}
if (tsdbRLockRepoMeta(pRepo) < 0) goto _err;
// reference all tables
for (int i = 0; i < pMem->maxTables; i++) {
if (pMeta->tables[i] != NULL) {
tsdbRefTable(pMeta->tables[i]);
iters[i].pTable = pMeta->tables[i];
}
}
if (tsdbUnlockRepoMeta(pRepo) < 0) goto _err;
for (int i = 0; i < pMem->maxTables; i++) {
if ((iters[i].pTable != NULL) && (pMem->tData[i] != NULL) && (TABLE_UID(iters[i].pTable) == pMem->tData[i]->uid)) {
if ((iters[i].pIter = tSkipListCreateIter(pMem->tData[i]->pData)) == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
goto _err;
}
tSkipListIterNext(iters[i].pIter);
}
}
return iters;
_err:
tsdbDestroyCommitIters(iters, pMem->maxTables);
return NULL;
}
static void tsdbDestroyCommitIters(SCommitIter *iters, int maxTables) {
if (iters == NULL) return;
for (int i = 1; i < maxTables; i++) {
if (iters[i].pTable != NULL) {
tsdbUnRefTable(iters[i].pTable);
tSkipListDestroyIter(iters[i].pIter);
}
}
free(iters);
}
......@@ -256,7 +256,8 @@ SFileGroup *tsdbCreateFGroupIfNeed(STsdbRepo *pRepo, char *dataDir, int fid) {
pFileH->pFGroup[pFileH->nFGroups++] = fGroup;
qsort((void *)(pFileH->pFGroup), pFileH->nFGroups, sizeof(SFileGroup), compFGroup);
pthread_rwlock_unlock(&pFileH->fhlock);
return tsdbSearchFGroup(pFileH, fid, TD_EQ);
pGroup = tsdbSearchFGroup(pFileH, fid, TD_EQ);
ASSERT(pGroup != NULL);
}
return pGroup;
......@@ -516,7 +517,7 @@ void tsdbGetFileInfoImpl(char *fname, uint32_t *magic, int64_t *size) {
SFile file;
SFile * pFile = &file;
strncpy(pFile->fname, fname, TSDB_FILENAME_LEN);
strncpy(pFile->fname, fname, TSDB_FILENAME_LEN - 1);
pFile->fd = -1;
if (tsdbOpenFile(pFile, O_RDONLY) < 0) goto _err;
......
......@@ -32,18 +32,6 @@
#define TSDB_DEFAULT_COMPRESSION TWO_STAGE_COMP
#define IS_VALID_COMPRESSION(compression) (((compression) >= NO_COMPRESSION) && ((compression) <= TWO_STAGE_COMP))
typedef struct {
int32_t totalLen;
int32_t len;
SDataRow row;
} SSubmitBlkIter;
typedef struct {
int32_t totalLen;
int32_t len;
void * pMsg;
} SSubmitMsgIter;
static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg);
static int32_t tsdbSetRepoEnv(char *rootDir, STsdbCfg *pCfg);
static int32_t tsdbUnsetRepoEnv(char *rootDir);
......@@ -52,20 +40,13 @@ static int tsdbLoadConfig(char *rootDir, STsdbCfg *pCfg);
static char * tsdbGetCfgFname(char *rootDir);
static STsdbRepo * tsdbNewRepo(char *rootDir, STsdbAppH *pAppH, STsdbCfg *pCfg);
static void tsdbFreeRepo(STsdbRepo *pRepo);
static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter);
static int32_t tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, TSKEY now, int32_t *affectedrows);
static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock);
static SDataRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter);
static int tsdbRestoreInfo(STsdbRepo *pRepo);
static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter);
static void tsdbAlterCompression(STsdbRepo *pRepo, int8_t compression);
static int tsdbAlterKeep(STsdbRepo *pRepo, int32_t keep);
static int tsdbAlterCacheTotalBlocks(STsdbRepo *pRepo, int totalBlocks);
static int keyFGroupCompFunc(const void *key, const void *fgroup);
static int tsdbEncodeCfg(void **buf, STsdbCfg *pCfg);
static void * tsdbDecodeCfg(void *buf, STsdbCfg *pCfg);
static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable);
static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg);
static void tsdbStartStream(STsdbRepo *pRepo);
static void tsdbStopStream(STsdbRepo *pRepo);
......@@ -153,17 +134,20 @@ _err:
}
// Note: all working thread and query thread must stopped when calling this function
void tsdbCloseRepo(TSDB_REPO_T *repo, int toCommit) {
if (repo == NULL) return;
int tsdbCloseRepo(TSDB_REPO_T *repo, int toCommit) {
if (repo == NULL) return 0;
STsdbRepo *pRepo = (STsdbRepo *)repo;
int vgId = REPO_ID(pRepo);
terrno = TSDB_CODE_SUCCESS;
tsdbStopStream(pRepo);
if (toCommit) {
tsdbAsyncCommit(pRepo);
sem_wait(&(pRepo->readyToCommit));
terrno = pRepo->code;
}
tsdbUnRefMemTable(pRepo, pRepo->mem);
tsdbUnRefMemTable(pRepo, pRepo->imem);
......@@ -175,40 +159,12 @@ void tsdbCloseRepo(TSDB_REPO_T *repo, int toCommit) {
tsdbCloseMeta(pRepo);
tsdbFreeRepo(pRepo);
tsdbDebug("vgId:%d repository is closed", vgId);
}
int32_t tsdbInsertData(TSDB_REPO_T *repo, SSubmitMsg *pMsg, SShellSubmitRspMsg *pRsp) {
STsdbRepo * pRepo = (STsdbRepo *)repo;
SSubmitMsgIter msgIter = {0};
if (tsdbScanAndConvertSubmitMsg(pRepo, pMsg) < 0) {
if (terrno != TSDB_CODE_TDB_TABLE_RECONFIGURE) {
tsdbError("vgId:%d failed to insert data since %s", REPO_ID(pRepo), tstrerror(terrno));
}
return -1;
}
if (tsdbInitSubmitMsgIter(pMsg, &msgIter) < 0) {
tsdbError("vgId:%d failed to insert data since %s", REPO_ID(pRepo), tstrerror(terrno));
if (terrno != TSDB_CODE_SUCCESS) {
return -1;
} else {
return 0;
}
SSubmitBlk *pBlock = NULL;
int32_t affectedrows = 0;
TSKEY now = taosGetTimestamp(pRepo->config.precision);
while (true) {
tsdbGetSubmitMsgNext(&msgIter, &pBlock);
if (pBlock == NULL) break;
if (tsdbInsertDataToTable(pRepo, pBlock, now, &affectedrows) < 0) {
return -1;
}
}
if (pRsp != NULL) pRsp->affectedRows = htonl(affectedrows);
if (tsdbCheckCommit(pRepo) < 0) return -1;
return 0;
}
uint32_t tsdbGetFileInfo(TSDB_REPO_T *repo, char *name, uint32_t *index, uint32_t eindex, int64_t *size) {
......@@ -672,6 +628,7 @@ static STsdbRepo *tsdbNewRepo(char *rootDir, STsdbAppH *pAppH, STsdbCfg *pCfg) {
}
pRepo->state = TSDB_STATE_OK;
pRepo->code = TSDB_CODE_SUCCESS;
int code = pthread_mutex_init(&pRepo->mutex, NULL);
if (code != 0) {
......@@ -735,93 +692,6 @@ static void tsdbFreeRepo(STsdbRepo *pRepo) {
}
}
static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter) {
if (pMsg == NULL) {
terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP;
return -1;
}
pIter->totalLen = pMsg->length;
pIter->len = 0;
pIter->pMsg = pMsg;
if (pMsg->length <= TSDB_SUBMIT_MSG_HEAD_SIZE) {
terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP;
return -1;
}
return 0;
}
static int32_t tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, TSKEY now, int32_t *affectedrows) {
STsdbMeta *pMeta = pRepo->tsdbMeta;
int64_t points = 0;
ASSERT(pBlock->tid < pMeta->maxTables);
STable *pTable = pMeta->tables[pBlock->tid];
ASSERT(pTable != NULL && TABLE_UID(pTable) == pBlock->uid);
SSubmitBlkIter blkIter = {0};
SDataRow row = NULL;
TSKEY minKey = now - tsMsPerDay[pRepo->config.precision] * pRepo->config.keep;
TSKEY maxKey = now + tsMsPerDay[pRepo->config.precision] * pRepo->config.daysPerFile;
tsdbInitSubmitBlkIter(pBlock, &blkIter);
while ((row = tsdbGetSubmitBlkNext(&blkIter)) != NULL) {
if (dataRowKey(row) < minKey || dataRowKey(row) > maxKey) {
tsdbError("vgId:%d table %s tid %d uid %" PRIu64 " timestamp is out of range! now %" PRId64 " minKey %" PRId64
" maxKey %" PRId64,
REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), now, minKey, maxKey);
terrno = TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE;
return -1;
}
if (tsdbUpdateRowInMem(pRepo, row, pTable) < 0) return -1;
(*affectedrows)++;
points++;
}
STSchema *pSchema = tsdbGetTableSchemaByVersion(pTable, pBlock->sversion);
pRepo->stat.pointsWritten += points * schemaNCols(pSchema);
pRepo->stat.totalStorage += points * schemaVLen(pSchema);
return 0;
}
static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock) {
if (pIter->len == 0) {
pIter->len += TSDB_SUBMIT_MSG_HEAD_SIZE;
} else {
SSubmitBlk *pSubmitBlk = (SSubmitBlk *)POINTER_SHIFT(pIter->pMsg, pIter->len);
pIter->len += (sizeof(SSubmitBlk) + pSubmitBlk->dataLen + pSubmitBlk->schemaLen);
}
if (pIter->len > pIter->totalLen) {
terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP;
*pPBlock = NULL;
return -1;
}
*pPBlock = (pIter->len == pIter->totalLen) ? NULL : (SSubmitBlk *)POINTER_SHIFT(pIter->pMsg, pIter->len);
return 0;
}
static SDataRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter) {
SDataRow row = pIter->row;
if (row == NULL) return NULL;
pIter->len += dataRowLen(row);
if (pIter->len >= pIter->totalLen) {
pIter->row = NULL;
} else {
pIter->row = (char *)row + dataRowLen(row);
}
return row;
}
static int tsdbRestoreInfo(STsdbRepo *pRepo) {
STsdbMeta * pMeta = pRepo->tsdbMeta;
STsdbFileH *pFileH = pRepo->tsdbFileH;
......@@ -855,14 +725,6 @@ _err:
return -1;
}
static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter) {
if (pBlock->dataLen <= 0) return -1;
pIter->totalLen = pBlock->dataLen;
pIter->len = 0;
pIter->row = (SDataRow)(pBlock->data+pBlock->schemaLen);
return 0;
}
static void tsdbAlterCompression(STsdbRepo *pRepo, int8_t compression) {
int8_t ocompression = pRepo->config.compression;
pRepo->config.compression = compression;
......@@ -959,134 +821,6 @@ static void *tsdbDecodeCfg(void *buf, STsdbCfg *pCfg) {
return buf;
}
static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable) {
ASSERT(pTable != NULL);
STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1);
int sversion = schemaVersion(pSchema);
if (pBlock->sversion == sversion) {
return 0;
} else {
if (TABLE_TYPE(pTable) == TSDB_STREAM_TABLE) { // stream table is not allowed to change schema
terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION;
return -1;
}
}
if (pBlock->sversion > sversion) { // may need to update table schema
if (pBlock->schemaLen > 0) {
tsdbDebug(
"vgId:%d table %s tid %d uid %" PRIu64 " schema version %d is out of data, client version %d, update...",
REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), sversion, pBlock->sversion);
ASSERT(pBlock->schemaLen % sizeof(STColumn) == 0);
int numOfCols = pBlock->schemaLen / sizeof(STColumn);
STColumn *pTCol = (STColumn *)pBlock->data;
STSchemaBuilder schemaBuilder = {0};
if (tdInitTSchemaBuilder(&schemaBuilder, pBlock->sversion) < 0) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
tsdbError("vgId:%d failed to update schema of table %s since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
tstrerror(terrno));
return -1;
}
for (int i = 0; i < numOfCols; i++) {
if (tdAddColToSchema(&schemaBuilder, pTCol[i].type, htons(pTCol[i].colId), htons(pTCol[i].bytes)) < 0) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
tsdbError("vgId:%d failed to update schema of table %s since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
tstrerror(terrno));
tdDestroyTSchemaBuilder(&schemaBuilder);
return -1;
}
}
STSchema *pNSchema = tdGetSchemaFromBuilder(&schemaBuilder);
if (pNSchema == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
tdDestroyTSchemaBuilder(&schemaBuilder);
return -1;
}
tdDestroyTSchemaBuilder(&schemaBuilder);
tsdbUpdateTableSchema(pRepo, pTable, pNSchema, true);
} else {
tsdbDebug(
"vgId:%d table %s tid %d uid %" PRIu64 " schema version %d is out of data, client version %d, reconfigure...",
REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), sversion, pBlock->sversion);
terrno = TSDB_CODE_TDB_TABLE_RECONFIGURE;
return -1;
}
} else {
ASSERT(pBlock->sversion >= 0);
if (tsdbGetTableSchemaImpl(pTable, false, false, pBlock->sversion) == NULL) {
tsdbError("vgId:%d invalid submit schema version %d to table %s tid %d from client", REPO_ID(pRepo),
pBlock->sversion, TABLE_CHAR_NAME(pTable), TABLE_TID(pTable));
}
terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION;
return -1;
}
return 0;
}
static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg) {
ASSERT(pMsg != NULL);
STsdbMeta * pMeta = pRepo->tsdbMeta;
SSubmitMsgIter msgIter = {0};
SSubmitBlk * pBlock = NULL;
terrno = TSDB_CODE_SUCCESS;
pMsg->length = htonl(pMsg->length);
pMsg->numOfBlocks = htonl(pMsg->numOfBlocks);
if (tsdbInitSubmitMsgIter(pMsg, &msgIter) < 0) return -1;
while (true) {
if (tsdbGetSubmitMsgNext(&msgIter, &pBlock) < 0) return -1;
if (pBlock == NULL) break;
pBlock->uid = htobe64(pBlock->uid);
pBlock->tid = htonl(pBlock->tid);
pBlock->sversion = htonl(pBlock->sversion);
pBlock->dataLen = htonl(pBlock->dataLen);
pBlock->schemaLen = htonl(pBlock->schemaLen);
pBlock->numOfRows = htons(pBlock->numOfRows);
if (pBlock->tid <= 0 || pBlock->tid >= pMeta->maxTables) {
tsdbError("vgId:%d failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pRepo), pBlock->uid,
pBlock->tid);
terrno = TSDB_CODE_TDB_INVALID_TABLE_ID;
return -1;
}
STable *pTable = pMeta->tables[pBlock->tid];
if (pTable == NULL || TABLE_UID(pTable) != pBlock->uid) {
tsdbError("vgId:%d failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pRepo), pBlock->uid,
pBlock->tid);
terrno = TSDB_CODE_TDB_INVALID_TABLE_ID;
return -1;
}
if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) {
tsdbError("vgId:%d invalid action trying to insert a super table %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable));
terrno = TSDB_CODE_TDB_INVALID_ACTION;
return -1;
}
// Check schema version and update schema if needed
if (tsdbCheckTableSchema(pRepo, pBlock, pTable) < 0) {
if (terrno == TSDB_CODE_TDB_TABLE_RECONFIGURE) {
continue;
} else {
return -1;
}
}
}
if (terrno != TSDB_CODE_SUCCESS) return -1;
return 0;
}
static int tsdbAlterCacheTotalBlocks(STsdbRepo *pRepo, int totalBlocks) {
// TODO
// STsdbCache *pCache = pRepo->tsdbCache;
......
此差异已折叠。
......@@ -1595,7 +1595,7 @@ static int tsdbProcessMergeCommit(SRWHelper *pHelper, SCommitIter *pCommitIter,
tblkIdx++;
} else if (oBlock.numOfRows + pMergeInfo->rowsInserted - pMergeInfo->rowsDeleteSucceed == 0) {
// Delete the block and do some stuff
ASSERT(pMergeInfo->keyFirst == INT64_MAX && pMergeInfo->keyFirst == INT64_MIN);
// ASSERT(pMergeInfo->keyFirst == INT64_MAX && pMergeInfo->keyFirst == INT64_MIN);
if (tsdbDeleteSuperBlock(pHelper, tblkIdx) < 0) return -1;
*pCommitIter->pIter = slIter;
if (oBlock.last && pHelper->hasOldLastBlock) pHelper->hasOldLastBlock = false;
......
......@@ -131,6 +131,7 @@ SSkipList *tSkipListCreate(uint8_t maxLevel, uint8_t keyType, uint16_t keyLen, _
__sl_key_fn_t fn);
void tSkipListDestroy(SSkipList *pSkipList);
SSkipListNode * tSkipListPut(SSkipList *pSkipList, void *pData);
void tSkipListPutBatch(SSkipList *pSkipList, void **ppData, int ndata);
SArray * tSkipListGet(SSkipList *pSkipList, SSkipListKey pKey);
void tSkipListPrint(SSkipList *pSkipList, int16_t nlevel);
SSkipListIterator *tSkipListCreateIter(SSkipList *pSkipList);
......
......@@ -236,6 +236,7 @@ int tdUpdateKVStoreRecord(SKVStore *pStore, uint64_t uid, void *cont, int contLe
rInfo.offset = lseek(pStore->fd, 0, SEEK_CUR);
if (rInfo.offset < 0) {
uError("failed to lseek file %s since %s", pStore->fname, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
......@@ -254,6 +255,7 @@ int tdUpdateKVStoreRecord(SKVStore *pStore, uint64_t uid, void *cont, int contLe
if (taosWrite(pStore->fd, cont, contLen) < contLen) {
uError("failed to write %d bytes to file %s since %s", contLen, pStore->fname, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
......
......@@ -287,17 +287,17 @@ static int32_t taosOpenLogFile(char *fn, int32_t maxLines, int32_t maxFileNum) {
tsLogObj.fileNum = maxFileNum;
taosGetLogFileName(fn);
if (strlen(fn) < LOG_FILE_NAME_LEN + 50 - 2) {
strcpy(name, fn);
strcat(name, ".0");
}
bool log0Exist = stat(name, &logstat0) >= 0;
if (strlen(fn) < LOG_FILE_NAME_LEN + 50 - 2) {
strcpy(name, fn);
strcat(name, ".1");
}
bool log0Exist = stat(name, &logstat0) >= 0;
bool log1Exist = stat(name, &logstat1) >= 0;
// if none of the log files exist, open 0, if both exists, open the old one
......
......@@ -426,11 +426,11 @@ static int taosDecRefCount(int rsetId, int64_t rid, int remove) {
(*pSet->fp)(pNode->p);
uTrace("rsetId:%d p:%p rid:%" PRId64 "is removed, count:%d, free mem: %p", rsetId, pNode->p, rid, pSet->count, pNode);
uTrace("rsetId:%d p:%p rid:%" PRId64 " is removed, count:%d, free mem: %p", rsetId, pNode->p, rid, pSet->count, pNode);
free(pNode);
released = 1;
} else {
uTrace("rsetId:%d p:%p rid:%" PRId64 "is released, count:%d", rsetId, pNode->p, rid, pNode->count);
uTrace("rsetId:%d p:%p rid:%" PRId64 " is released, count:%d", rsetId, pNode->p, rid, pNode->count);
}
} else {
uTrace("rsetId:%d rid:%" PRId64 " is not there, failed to release/remove", rsetId, rid);
......
......@@ -24,10 +24,12 @@ static SSkipListNode * getPriorNode(SSkipList *pSkipList, const char *val, in
static void tSkipListRemoveNodeImpl(SSkipList *pSkipList, SSkipListNode *pNode);
static void tSkipListCorrectLevel(SSkipList *pSkipList);
static SSkipListIterator *doCreateSkipListIterator(SSkipList *pSkipList, int32_t order);
static void tSkipListDoInsert(SSkipList *pSkipList, SSkipListNode **backward, SSkipListNode *pNode);
static bool tSkipListGetPosToPut(SSkipList *pSkipList, SSkipListNode **backward, void *pData);
static SSkipListNode * tSkipListNewNode(uint8_t level);
static void tSkipListDoInsert(SSkipList *pSkipList, SSkipListNode **direction, SSkipListNode *pNode, bool isForward);
static bool tSkipListGetPosToPut(SSkipList *pSkipList, SSkipListNode **backward, void *pData);
static SSkipListNode *tSkipListNewNode(uint8_t level);
#define tSkipListFreeNode(n) tfree((n))
static SSkipListNode *tSkipListPutImpl(SSkipList *pSkipList, void *pData, SSkipListNode **direction, bool isForward,
bool hasDup);
static FORCE_INLINE int tSkipListWLock(SSkipList *pSkipList);
static FORCE_INLINE int tSkipListRLock(SSkipList *pSkipList);
......@@ -109,30 +111,85 @@ SSkipListNode *tSkipListPut(SSkipList *pSkipList, void *pData) {
if (pSkipList == NULL || pData == NULL) return NULL;
SSkipListNode *backward[MAX_SKIP_LIST_LEVEL] = {0};
uint8_t dupMode = SL_DUP_MODE(pSkipList);
SSkipListNode *pNode = NULL;
tSkipListWLock(pSkipList);
bool hasDup = tSkipListGetPosToPut(pSkipList, backward, pData);
pNode = tSkipListPutImpl(pSkipList, pData, backward, false, hasDup);
if (hasDup && (dupMode == SL_DISCARD_DUP_KEY || dupMode == SL_UPDATE_DUP_KEY)) {
if (dupMode == SL_UPDATE_DUP_KEY) {
pNode = SL_NODE_GET_BACKWARD_POINTER(backward[0], 0);
atomic_store_ptr(&(pNode->pData), pData);
}
} else {
pNode = tSkipListNewNode(getSkipListRandLevel(pSkipList));
if (pNode != NULL) {
pNode->pData = pData;
tSkipListUnlock(pSkipList);
return pNode;
}
// Put a batch of data into skiplist. The batch of data must be in ascending order
void tSkipListPutBatch(SSkipList *pSkipList, void **ppData, int ndata) {
SSkipListNode *backward[MAX_SKIP_LIST_LEVEL] = {0};
SSkipListNode *forward[MAX_SKIP_LIST_LEVEL] = {0};
bool hasDup = false;
char * pKey = NULL;
char * pDataKey = NULL;
int compare = 0;
tSkipListWLock(pSkipList);
tSkipListDoInsert(pSkipList, backward, pNode);
// backward to put the first data
hasDup = tSkipListGetPosToPut(pSkipList, backward, ppData[0]);
tSkipListPutImpl(pSkipList, ppData[0], backward, false, hasDup);
for (int level = 0; level < pSkipList->maxLevel; level++) {
forward[level] = SL_NODE_GET_BACKWARD_POINTER(backward[level], level);
}
// forward to put the rest of data
for (int idata = 1; idata < ndata; idata++) {
pDataKey = pSkipList->keyFn(ppData[idata]);
// Compare max key
pKey = SL_GET_MAX_KEY(pSkipList);
compare = pSkipList->comparFn(pDataKey, pKey);
if (compare > 0) {
for (int i = 0; i < pSkipList->maxLevel; i++) {
forward[i] = SL_NODE_GET_BACKWARD_POINTER(pSkipList->pTail, i);
}
hasDup = false;
} else {
SSkipListNode *px = pSkipList->pHead;
for (int i = pSkipList->maxLevel - 1; i >= 0; --i) {
if (i < pSkipList->level) {
// set new px
if (forward[i] != pSkipList->pHead) {
if (px == pSkipList->pHead ||
pSkipList->comparFn(SL_GET_NODE_KEY(pSkipList, forward[i]), SL_GET_NODE_KEY(pSkipList, px)) > 0) {
px = forward[i];
}
}
SSkipListNode *p = SL_NODE_GET_FORWARD_POINTER(px, i);
while (p != pSkipList->pTail) {
pKey = SL_GET_NODE_KEY(pSkipList, p);
compare = pSkipList->comparFn(pKey, pDataKey);
if (compare >= 0) {
if (compare == 0) hasDup = true;
break;
} else {
px = p;
p = SL_NODE_GET_FORWARD_POINTER(px, i);
}
}
}
forward[i] = px;
}
}
tSkipListPutImpl(pSkipList, ppData[idata], forward, true, hasDup);
}
tSkipListUnlock(pSkipList);
return pNode;
}
uint32_t tSkipListRemove(SSkipList *pSkipList, SSkipListKey key) {
......@@ -310,22 +367,25 @@ void tSkipListPrint(SSkipList *pSkipList, int16_t nlevel) {
}
}
static void tSkipListDoInsert(SSkipList *pSkipList, SSkipListNode **backward, SSkipListNode *pNode) {
static void tSkipListDoInsert(SSkipList *pSkipList, SSkipListNode **direction, SSkipListNode *pNode, bool isForward) {
for (int32_t i = 0; i < pNode->level; ++i) {
if (i >= pSkipList->level) {
SL_NODE_GET_FORWARD_POINTER(pNode, i) = pSkipList->pTail;
SL_NODE_GET_BACKWARD_POINTER(pNode, i) = pSkipList->pHead;
SL_NODE_GET_FORWARD_POINTER(pSkipList->pHead, i) = pNode;
SL_NODE_GET_BACKWARD_POINTER(pSkipList->pTail, i) = pNode;
SSkipListNode *x = direction[i];
if (isForward) {
SL_NODE_GET_BACKWARD_POINTER(pNode, i) = x;
SSkipListNode *next = SL_NODE_GET_FORWARD_POINTER(x, i);
SL_NODE_GET_BACKWARD_POINTER(next, i) = pNode;
SL_NODE_GET_FORWARD_POINTER(pNode, i) = next;
SL_NODE_GET_FORWARD_POINTER(x, i) = pNode;
} else {
SSkipListNode *x = backward[i];
SL_NODE_GET_FORWARD_POINTER(pNode, i) = x;
SSkipListNode *prev = SL_NODE_GET_BACKWARD_POINTER(x, i);
SL_NODE_GET_FORWARD_POINTER(prev, i) = pNode;
SL_NODE_GET_BACKWARD_POINTER(x, i) = pNode;
SL_NODE_GET_BACKWARD_POINTER(pNode, i) = prev;
SL_NODE_GET_BACKWARD_POINTER(x, i) = pNode;
}
}
......@@ -377,7 +437,7 @@ static bool tSkipListGetPosToPut(SSkipList *pSkipList, SSkipListNode **backward,
char * pDataKey = pSkipList->keyFn(pData);
if (pSkipList->size == 0) {
for (int i = 0; i < pSkipList->level; i++) {
for (int i = 0; i < pSkipList->maxLevel; i++) {
backward[i] = pSkipList->pTail;
}
} else {
......@@ -387,7 +447,7 @@ static bool tSkipListGetPosToPut(SSkipList *pSkipList, SSkipListNode **backward,
pKey = SL_GET_MAX_KEY(pSkipList);
compare = pSkipList->comparFn(pDataKey, pKey);
if (compare >= 0) {
for (int i = 0; i < pSkipList->level; i++) {
for (int i = 0; i < pSkipList->maxLevel; i++) {
backward[i] = pSkipList->pTail;
}
......@@ -398,7 +458,7 @@ static bool tSkipListGetPosToPut(SSkipList *pSkipList, SSkipListNode **backward,
pKey = SL_GET_MIN_KEY(pSkipList);
compare = pSkipList->comparFn(pDataKey, pKey);
if (compare < 0) {
for (int i = 0; i < pSkipList->level; i++) {
for (int i = 0; i < pSkipList->maxLevel; i++) {
backward[i] = SL_NODE_GET_FORWARD_POINTER(pSkipList->pHead, i);
}
......@@ -406,18 +466,20 @@ static bool tSkipListGetPosToPut(SSkipList *pSkipList, SSkipListNode **backward,
}
SSkipListNode *px = pSkipList->pTail;
for (int i = pSkipList->level - 1; i >= 0; --i) {
SSkipListNode *p = SL_NODE_GET_BACKWARD_POINTER(px, i);
while (p != pSkipList->pHead) {
pKey = SL_GET_NODE_KEY(pSkipList, p);
compare = pSkipList->comparFn(pKey, pDataKey);
if (compare <= 0) {
if (compare == 0 && !hasDupKey) hasDupKey = true;
break;
} else {
px = p;
p = SL_NODE_GET_BACKWARD_POINTER(px, i);
for (int i = pSkipList->maxLevel - 1; i >= 0; --i) {
if (i < pSkipList->level) {
SSkipListNode *p = SL_NODE_GET_BACKWARD_POINTER(px, i);
while (p != pSkipList->pHead) {
pKey = SL_GET_NODE_KEY(pSkipList, p);
compare = pSkipList->comparFn(pKey, pDataKey);
if (compare <= 0) {
if (compare == 0 && !hasDupKey) hasDupKey = true;
break;
} else {
px = p;
p = SL_NODE_GET_BACKWARD_POINTER(px, i);
}
}
}
......@@ -579,6 +641,32 @@ static SSkipListNode *tSkipListNewNode(uint8_t level) {
return pNode;
}
static SSkipListNode *tSkipListPutImpl(SSkipList *pSkipList, void *pData, SSkipListNode **direction, bool isForward,
bool hasDup) {
uint8_t dupMode = SL_DUP_MODE(pSkipList);
SSkipListNode *pNode = NULL;
if (hasDup && (dupMode == SL_DISCARD_DUP_KEY || dupMode == SL_UPDATE_DUP_KEY)) {
if (dupMode == SL_UPDATE_DUP_KEY) {
if (isForward) {
pNode = SL_NODE_GET_FORWARD_POINTER(direction[0], 0);
} else {
pNode = SL_NODE_GET_BACKWARD_POINTER(direction[0], 0);
}
atomic_store_ptr(&(pNode->pData), pData);
}
} else {
pNode = tSkipListNewNode(getSkipListRandLevel(pSkipList));
if (pNode != NULL) {
pNode->pData = pData;
tSkipListDoInsert(pSkipList, direction, pNode, isForward);
}
}
return pNode;
}
// static int32_t tSkipListEndParQuery(SSkipList *pSkipList, SSkipListNode *pStartNode, SSkipListKey *pEndKey,
// int32_t cond, SSkipListNode ***pRes) {
// pthread_rwlock_rdlock(&pSkipList->lock);
......
......@@ -107,7 +107,7 @@ int32_t taosWriteMsg(SOCKET fd, void *buf, int32_t nbytes) {
while (nleft > 0) {
nwritten = (int32_t)taosWriteSocket(fd, (char *)ptr, (size_t)nleft);
if (nwritten <= 0) {
if (errno == EINTR)
if (errno == EINTR || errno == EAGAIN || errno == EWOULDBLOCK)
continue;
else
return -1;
......@@ -133,7 +133,7 @@ int32_t taosReadMsg(SOCKET fd, void *buf, int32_t nbytes) {
if (nread == 0) {
break;
} else if (nread < 0) {
if (errno == EINTR) {
if (errno == EINTR || errno == EAGAIN || errno == EWOULDBLOCK) {
continue;
} else {
return -1;
......
......@@ -58,7 +58,7 @@ typedef struct {
char *rootDir;
tsem_t sem;
int8_t dropped;
char db[TSDB_DB_NAME_LEN];
char db[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN];
} SVnodeObj;
void vnodeInitWriteFp(void);
......
......@@ -25,7 +25,7 @@
#include "vnodeCfg.h"
static void vnodeLoadCfg(SVnodeObj *pVnode, SCreateVnodeMsg* vnodeMsg) {
strcpy(pVnode->db, vnodeMsg->db);
tstrncpy(pVnode->db, vnodeMsg->db, sizeof(pVnode->db));
pVnode->cfgVersion = vnodeMsg->cfg.cfgVersion;
pVnode->tsdbCfg.cacheBlockSize = vnodeMsg->cfg.cacheBlockSize;
pVnode->tsdbCfg.totalBlocks = vnodeMsg->cfg.totalBlocks;
......@@ -97,7 +97,7 @@ int32_t vnodeReadCfg(SVnodeObj *pVnode) {
vError("vgId:%d, failed to read %s, db not found", pVnode->vgId, file);
goto PARSE_VCFG_ERROR;
}
strcpy(vnodeMsg.db, db->valuestring);
tstrncpy(vnodeMsg.db, db->valuestring, sizeof(vnodeMsg.db));
cJSON *cfgVersion = cJSON_GetObjectItem(root, "cfgVersion");
if (!cfgVersion || cfgVersion->type != cJSON_Number) {
......
......@@ -30,7 +30,7 @@
static SHashObj*tsVnodesHash;
static void vnodeCleanUp(SVnodeObj *pVnode);
static int vnodeProcessTsdbStatus(void *arg, int status);
static int vnodeProcessTsdbStatus(void *arg, int status, int eno);
static uint32_t vnodeGetFileInfo(void *ahandle, char *name, uint32_t *index, uint32_t eindex, int64_t *size, uint64_t *fversion);
static int vnodeGetWalInfo(void *ahandle, char *fileName, int64_t *fileId);
static void vnodeNotifyRole(void *ahandle, int8_t role);
......@@ -590,9 +590,13 @@ static void vnodeCleanUp(SVnodeObj *pVnode) {
}
// TODO: this is a simple implement
static int vnodeProcessTsdbStatus(void *arg, int status) {
static int vnodeProcessTsdbStatus(void *arg, int status, int eno) {
SVnodeObj *pVnode = arg;
if (eno != TSDB_CODE_SUCCESS) {
// TODO: deal with the error here
}
if (status == TSDB_STATUS_COMMIT_START) {
pVnode->fversion = pVnode->version;
vDebug("vgId:%d, start commit, fver:%" PRIu64 " vver:%" PRIu64, pVnode->vgId, pVnode->fversion, pVnode->version);
......
......@@ -37,7 +37,7 @@ extern int32_t wDebugFlag;
#define WAL_MAX_SIZE (TSDB_MAX_WAL_SIZE + sizeof(SWalHead) + 16)
#define WAL_SIGNATURE ((uint32_t)(0xFAFBFDFE))
#define WAL_PATH_LEN (TSDB_FILENAME_LEN + 12)
#define WAL_FILE_LEN (TSDB_FILENAME_LEN + 32)
#define WAL_FILE_LEN (WAL_PATH_LEN + 32)
#define WAL_FILE_NUM 3
typedef struct {
......
......@@ -155,6 +155,9 @@ python3 ./test.py -f query/queryCountCSVData.py
python3 ./test.py -f query/natualInterval.py
python3 ./test.py -f query/bug1471.py
#python3 ./test.py -f query/dataLossTest.py
python3 ./test.py -f query/bug1874.py
python3 ./test.py -f query/bug1875.py
python3 ./test.py -f query/bug1876.py
#stream
python3 ./test.py -f stream/metric_1.py
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册