提交 162465cc 编写于 作者: H Hongze Cheng

Merge branch 'develop' into feature/2.0tsdb

......@@ -384,9 +384,11 @@ SDataCols *tdDupDataCols(SDataCols *pDataCols, bool keepData) {
}
void tdResetDataCols(SDataCols *pCols) {
pCols->numOfRows = 0;
for (int i = 0; i < pCols->maxCols; i++) {
dataColReset(pCols->cols + i);
if (pCols != NULL) {
pCols->numOfRows = 0;
for (int i = 0; i < pCols->maxCols; i++) {
dataColReset(pCols->cols + i);
}
}
}
......
package com.taosdata.jdbc;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.sql.*;
import java.util.Properties;
import java.util.Random;
import static org.junit.Assert.assertEquals;
import java.util.Properties;
import java.util.concurrent.Executors;
import java.util.concurrent.*;
import static org.junit.Assert.assertTrue;
public class BatchInsertTest extends BaseTest {
static Connection connection = null;
static Statement statement = null;
static String dbName = "test";
static String stbName = "meters";
static String host = "localhost";
static int numOfTables = 30;
final static int numOfRecordsPerTable = 1000;
static long ts = 1496732686000l;
final static String tablePrefix = "t";
@Before
public void createDatabase() throws SQLException {
try {
Class.forName("com.taosdata.jdbc.TSDBDriver");
} catch (ClassNotFoundException e) {
return;
}
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" + "?user=root&password=taosdata"
, properties);
statement = connection.createStatement();
statement.executeUpdate("drop database if exists " + dbName);
statement.executeUpdate("create database if not exists " + dbName);
statement.executeUpdate("use " + dbName);
String createTableSql = "create table " + stbName + "(ts timestamp, f1 int, f2 int, f3 int) tags(areaid int, loc binary(20))";
statement.executeUpdate(createTableSql);
for(int i = 0; i < numOfTables; i++) {
String loc = i % 2 == 0 ? "beijing" : "shanghai";
String createSubTalbesSql = "create table " + tablePrefix + i + " using " + stbName + " tags(" + i + ", '" + loc + "')";
statement.executeUpdate(createSubTalbesSql);
}
}
@Test
public void testBatchInsert() throws SQLException{
ExecutorService executorService = Executors.newFixedThreadPool(numOfTables);
for (int i = 0; i < numOfTables; i++) {
final int index = i;
executorService.execute(new Runnable() {
@Override
public void run() {
try {
long startTime = System.currentTimeMillis();
Statement statement = connection.createStatement(); // get statement
StringBuilder sb = new StringBuilder();
sb.append("INSERT INTO " + tablePrefix + index + " VALUES");
Random rand = new Random();
for (int j = 1; j <= numOfRecordsPerTable; j++) {
sb.append("(" + (ts + j) + ", ");
sb.append(rand.nextInt(100) + ", ");
sb.append(rand.nextInt(100) + ", ");
sb.append(rand.nextInt(100) + ")");
}
statement.addBatch(sb.toString());
statement.executeBatch();
long endTime = System.currentTimeMillis();
System.out.println("Thread " + index + " takes " + (endTime - startTime) + " microseconds");
connection.commit();
statement.close();
} catch (Exception e) {
e.printStackTrace();
}
}
});
}
executorService.shutdown();
try {
executorService.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
e.printStackTrace();
}
Statement statement = connection.createStatement();
ResultSet rs = statement.executeQuery("select * from meters");
int num = 0;
while (rs.next()) {
num++;
}
assertEquals(num, numOfTables * numOfRecordsPerTable);
rs.close();
}
@After
public void close() throws Exception {
statement.close();
connection.close();
Thread.sleep(10);
}
}
\ No newline at end of file
......@@ -252,13 +252,15 @@ static void sdbConfirmForward(void *ahandle, void *param, int32_t code) {
int32_t processedCount = atomic_add_fetch_32(&pOper->processedCount, 1);
if (processedCount <= 1) {
if (pMsg != NULL) {
sdbDebug("app:%p:%p, waiting for confirm this operation, count:%d", pMsg->rpcMsg.ahandle, pMsg, processedCount);
sdbDebug("app:%p:%p, waiting for confirm this operation, count:%d result:%s", pMsg->rpcMsg.ahandle, pMsg,
processedCount, tstrerror(code));
}
return;
}
if (pMsg != NULL) {
sdbDebug("app:%p:%p, is confirmed and will do callback func", pMsg->rpcMsg.ahandle, pMsg);
sdbDebug("app:%p:%p, is confirmed and will do callback func, result:%s", pMsg->rpcMsg.ahandle, pMsg,
tstrerror(code));
}
if (pOper->cb != NULL) {
......
......@@ -2202,7 +2202,13 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER;
while (tsdbNextDataBlock(pQueryHandle)) {
while (true) {
if (!tsdbNextDataBlock(pQueryHandle)) {
if (terrno != TSDB_CODE_SUCCESS) {
longjmp(pRuntimeEnv->env, terrno);
}
break;
}
summary->totalBlocks += 1;
if (IS_QUERY_KILLED(GET_QINFO_ADDR(pRuntimeEnv))) {
......@@ -3188,6 +3194,9 @@ static void setEnvBeforeReverseScan(SQueryRuntimeEnv *pRuntimeEnv, SQueryStatusI
// add ref for table
pRuntimeEnv->pSecQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &pQInfo->tableGroupInfo, pQInfo);
if (pRuntimeEnv->pSecQueryHandle == NULL) {
longjmp(pRuntimeEnv->env, terrno);
}
setQueryStatus(pQuery, QUERY_NOT_COMPLETED);
switchCtxOrder(pRuntimeEnv);
......@@ -3260,6 +3269,9 @@ void scanOneTableDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) {
}
pRuntimeEnv->pSecQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &pQInfo->tableGroupInfo, pQInfo);
if (pRuntimeEnv->pSecQueryHandle == NULL) {
longjmp(pRuntimeEnv->env, terrno);
}
pRuntimeEnv->windowResInfo.curIndex = qstatus.windowIndex;
setQueryStatus(pQuery, QUERY_NOT_COMPLETED);
......@@ -3916,7 +3928,14 @@ void skipBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
TsdbQueryHandleT pQueryHandle = pRuntimeEnv->pQueryHandle;
SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER;
while (tsdbNextDataBlock(pQueryHandle)) {
while (true) {
if (!tsdbNextDataBlock(pQueryHandle)) {
if (terrno != TSDB_CODE_SUCCESS) {
longjmp(pRuntimeEnv->env, terrno);
}
break;
}
if (IS_QUERY_KILLED(GET_QINFO_ADDR(pRuntimeEnv))) {
finalizeQueryResult(pRuntimeEnv); // clean up allocated resource during query
longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED);
......@@ -3960,7 +3979,14 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) {
STableQueryInfo *pTableQueryInfo = pQuery->current;
SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER;
while (tsdbNextDataBlock(pRuntimeEnv->pQueryHandle)) {
while (true) {
if (!tsdbNextDataBlock(pRuntimeEnv->pQueryHandle)) {
if (terrno != TSDB_CODE_SUCCESS) {
longjmp(pRuntimeEnv->env, terrno);
}
break;
}
tsdbRetrieveDataBlockInfo(pRuntimeEnv->pQueryHandle, &blockInfo);
if (QUERY_IS_ASC_QUERY(pQuery)) {
......@@ -4059,16 +4085,16 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) {
return true;
}
static void setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery) {
static int32_t setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery) {
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
if (onlyQueryTags(pQuery)) {
return;
return TSDB_CODE_SUCCESS;
}
if (isSTableQuery && (!QUERY_IS_INTERVAL_QUERY(pQuery)) && (!isFixedOutputQuery(pRuntimeEnv))) {
return;
return TSDB_CODE_SUCCESS;
}
STsdbQueryCond cond = {
......@@ -4090,6 +4116,7 @@ static void setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery) {
cond.twindow = pCheckInfo->win;
}
terrno = TSDB_CODE_SUCCESS;
if (isFirstLastRowQuery(pQuery)) {
pRuntimeEnv->pQueryHandle = tsdbQueryLastRow(tsdb, &cond, &pQInfo->tableGroupInfo, pQInfo);
} else if (isPointInterpoQuery(pQuery)) {
......@@ -4097,6 +4124,7 @@ static void setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery) {
} else {
pRuntimeEnv->pQueryHandle = tsdbQueryTables(tsdb, &cond, &pQInfo->tableGroupInfo, pQInfo);
}
return terrno;
}
static SFillColInfo* taosCreateFillColInfo(SQuery* pQuery) {
......@@ -4133,7 +4161,10 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo
setScanLimitationByResultBuffer(pQuery);
changeExecuteScanOrder(pQInfo, false);
setupQueryHandle(tsdb, pQInfo, isSTableQuery);
code = setupQueryHandle(tsdb, pQInfo, isSTableQuery);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
pQInfo->tsdb = tsdb;
pQInfo->vgId = vgId;
......@@ -4257,7 +4288,14 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) {
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
while (tsdbNextDataBlock(pQueryHandle)) {
while (true) {
if (!tsdbNextDataBlock(pQueryHandle)) {
if (terrno != TSDB_CODE_SUCCESS) {
longjmp(pRuntimeEnv->env, terrno);
}
break;
}
summary->totalBlocks += 1;
if (IS_QUERY_KILLED(pQInfo)) {
......@@ -4338,6 +4376,9 @@ static bool multiTableMultioutputHelper(SQInfo *pQInfo, int32_t index) {
pRuntimeEnv->pQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &gp, pQInfo);
taosArrayDestroy(tx);
taosArrayDestroy(g1);
if (pRuntimeEnv->pQueryHandle == NULL) {
longjmp(pRuntimeEnv->env, terrno);
}
if (pRuntimeEnv->pTSBuf != NULL) {
if (pRuntimeEnv->cur.vgroupIndex == -1) {
......@@ -4405,6 +4446,12 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
} else {
pRuntimeEnv->pQueryHandle = tsdbQueryRowsInExternalWindow(pQInfo->tsdb, &cond, &gp, pQInfo);
}
taosArrayDestroy(tx);
taosArrayDestroy(g1);
if (pRuntimeEnv->pQueryHandle == NULL) {
longjmp(pRuntimeEnv->env, terrno);
}
initCtxOutputBuf(pRuntimeEnv);
......@@ -4469,6 +4516,9 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
pRuntimeEnv->pQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &gp, pQInfo);
taosArrayDestroy(g1);
taosArrayDestroy(tx);
if (pRuntimeEnv->pQueryHandle == NULL) {
longjmp(pRuntimeEnv->env, terrno);
}
SArray* s = tsdbGetQueriedTableList(pRuntimeEnv->pQueryHandle);
assert(taosArrayGetSize(s) >= 1);
......@@ -4664,6 +4714,9 @@ static void doSaveContext(SQInfo *pQInfo) {
pRuntimeEnv->prevGroupId = INT32_MIN;
pRuntimeEnv->pSecQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &pQInfo->tableGroupInfo, pQInfo);
if (pRuntimeEnv->pSecQueryHandle == NULL) {
longjmp(pRuntimeEnv->env, terrno);
}
setQueryStatus(pQuery, QUERY_NOT_COMPLETED);
switchCtxOrder(pRuntimeEnv);
......
......@@ -179,7 +179,10 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab
pQueryHandle->outputCapacity = ((STsdbRepo*)tsdb)->config.maxRowsPerFileBlock;
pQueryHandle->allocSize = 0;
tsdbInitReadHelper(&pQueryHandle->rhelper, (STsdbRepo*) tsdb);
if (tsdbInitReadHelper(&pQueryHandle->rhelper, (STsdbRepo*) tsdb) != 0) {
free(pQueryHandle);
return NULL;
}
tsdbTakeMemSnapshot(pQueryHandle->pTsdb, &pQueryHandle->mem, &pQueryHandle->imem);
size_t sizeOfGroup = taosArrayGetSize(groupList->pGroupList);
......@@ -238,11 +241,11 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab
TsdbQueryHandleT tsdbQueryLastRow(TSDB_REPO_T *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList, void* qinfo) {
STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList, qinfo);
pQueryHandle->type = TSDB_QUERY_TYPE_LAST;
pQueryHandle->order = TSDB_ORDER_DESC;
changeQueryHandleForLastrowQuery(pQueryHandle);
if (pQueryHandle != NULL) {
pQueryHandle->type = TSDB_QUERY_TYPE_LAST;
pQueryHandle->order = TSDB_ORDER_DESC;
changeQueryHandleForLastrowQuery(pQueryHandle);
}
return pQueryHandle;
}
......@@ -264,9 +267,10 @@ SArray* tsdbGetQueriedTableList(TsdbQueryHandleT *pHandle) {
TsdbQueryHandleT tsdbQueryRowsInExternalWindow(TSDB_REPO_T *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList, void* qinfo) {
STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList, qinfo);
pQueryHandle->type = TSDB_QUERY_TYPE_EXTERNAL;
changeQueryHandleForInterpQuery(pQueryHandle);
if (pQueryHandle != NULL) {
pQueryHandle->type = TSDB_QUERY_TYPE_EXTERNAL;
changeQueryHandleForInterpQuery(pQueryHandle);
}
return pQueryHandle;
}
......@@ -1522,7 +1526,10 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) {
pSecQueryHandle->activeIndex = 0;
pSecQueryHandle->outputCapacity = ((STsdbRepo*)pSecQueryHandle->pTsdb)->config.maxRowsPerFileBlock;
tsdbInitReadHelper(&pSecQueryHandle->rhelper, (STsdbRepo*) pSecQueryHandle->pTsdb);
if (tsdbInitReadHelper(&pSecQueryHandle->rhelper, (STsdbRepo*) pSecQueryHandle->pTsdb) != 0) {
free(pSecQueryHandle);
return false;
}
tsdbTakeMemSnapshot(pSecQueryHandle->pTsdb, &pSecQueryHandle->mem, &pSecQueryHandle->imem);
// allocate buffer in order to load data blocks from file
......@@ -1606,7 +1613,9 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) {
}
// TODO: opt by consider the scan order
return doHasDataInBuffer(pQueryHandle);
bool ret = doHasDataInBuffer(pQueryHandle);
terrno = TSDB_CODE_SUCCESS;
return ret;
}
void changeQueryHandleForLastrowQuery(TsdbQueryHandleT pqHandle) {
......
......@@ -84,8 +84,17 @@ class WorkerThread:
# Let us have a DB connection of our own
if (gConfig.per_thread_db_connection): # type: ignore
# print("connector_type = {}".format(gConfig.connector_type))
self._dbConn = DbConn.createNative() if (
gConfig.connector_type == 'native') else DbConn.createRest()
if gConfig.connector_type == 'native':
self._dbConn = DbConn.createNative()
elif gConfig.connector_type == 'rest':
self._dbConn = DbConn.createRest()
elif gConfig.connector_type == 'mixed':
if Dice.throw(2) == 0: # 1/2 chance
self._dbConn = DbConn.createNative()
else:
self._dbConn = DbConn.createRest()
else:
raise RuntimeError("Unexpected connector type: {}".format(gConfig.connector_type))
self._dbInUse = False # if "use db" was executed already
......@@ -130,22 +139,15 @@ class WorkerThread:
while True:
tc = self._tc # Thread Coordinator, the overall master
tc.crossStepBarrier() # shared barrier first, INCLUDING the last one
logger.debug(
"[TRD] Worker thread [{}] exited barrier...".format(
self._tid))
logger.debug("[TRD] Worker thread [{}] exited barrier...".format(self._tid))
self.crossStepGate() # then per-thread gate, after being tapped
logger.debug(
"[TRD] Worker thread [{}] exited step gate...".format(
self._tid))
logger.debug("[TRD] Worker thread [{}] exited step gate...".format(self._tid))
if not self._tc.isRunning():
logger.debug(
"[TRD] Thread Coordinator not running any more, worker thread now stopping...")
logger.debug("[TRD] Thread Coordinator not running any more, worker thread now stopping...")
break
# Fetch a task from the Thread Coordinator
logger.debug(
"[TRD] Worker thread [{}] about to fetch task".format(
self._tid))
logger.debug( "[TRD] Worker thread [{}] about to fetch task".format(self._tid))
task = tc.fetchTask()
# Execute such a task
......@@ -154,9 +156,7 @@ class WorkerThread:
self._tid, task.__class__.__name__))
task.execute(self)
tc.saveExecutedTask(task)
logger.debug(
"[TRD] Worker thread [{}] finished executing task".format(
self._tid))
logger.debug("[TRD] Worker thread [{}] finished executing task".format(self._tid))
self._dbInUse = False # there may be changes between steps
......@@ -255,101 +255,124 @@ class ThreadCoordinator:
self._runStatus = MainExec.STATUS_STOPPING
self._execStats.registerFailure("User Interruption")
def _runShouldEnd(self, transitionFailed, hasAbortedTask):
maxSteps = gConfig.max_steps # type: ignore
if self._curStep >= (maxSteps - 1): # maxStep==10, last curStep should be 9
return True
if self._runStatus != MainExec.STATUS_RUNNING:
return True
if transitionFailed:
return True
if hasAbortedTask:
return True
return False
def _hasAbortedTask(self): # from execution of previous step
for task in self._executedTasks:
if task.isAborted():
# print("Task aborted: {}".format(task))
# hasAbortedTask = True
return True
return False
def _releaseAllWorkerThreads(self, transitionFailed):
self._curStep += 1 # we are about to get into next step. TODO: race condition here!
# Now not all threads had time to go to sleep
logger.debug(
"--\r\n\n--> Step {} starts with main thread waking up".format(self._curStep))
# A new TE for the new step
self._te = None # set to empty first, to signal worker thread to stop
if not transitionFailed: # only if not failed
self._te = TaskExecutor(self._curStep)
logger.debug("[TRD] Main thread waking up at step {}, tapping worker threads".format(
self._curStep)) # Now not all threads had time to go to sleep
# Worker threads will wake up at this point, and each execute it's own task
self.tapAllThreads() # release all worker thread from their "gate"
def _syncAtBarrier(self):
# Now main thread (that's us) is ready to enter a step
# let other threads go past the pool barrier, but wait at the
# thread gate
logger.debug("[TRD] Main thread about to cross the barrier")
self.crossStepBarrier()
self._stepBarrier.reset() # Other worker threads should now be at the "gate"
logger.debug("[TRD] Main thread finished crossing the barrier")
def _doTransition(self):
transitionFailed = False
try:
sm = self._dbManager.getStateMachine()
logger.debug("[STT] starting transitions")
# at end of step, transiton the DB state
sm.transition(self._executedTasks)
logger.debug("[STT] transition ended")
# Due to limitation (or maybe not) of the Python library,
# we cannot share connections across threads
if sm.hasDatabase():
for t in self._pool.threadList:
logger.debug("[DB] use db for all worker threads")
t.useDb()
# t.execSql("use db") # main thread executing "use
# db" on behalf of every worker thread
except taos.error.ProgrammingError as err:
if (err.msg == 'network unavailable'): # broken DB connection
logger.info("DB connection broken, execution failed")
traceback.print_stack()
transitionFailed = True
self._te = None # Not running any more
self._execStats.registerFailure("Broken DB Connection")
# continue # don't do that, need to tap all threads at
# end, and maybe signal them to stop
else:
raise
self.resetExecutedTasks() # clear the tasks after we are done
# Get ready for next step
logger.debug("<-- Step {} finished, trasition failed = {}".format(self._curStep, transitionFailed))
return transitionFailed
def run(self):
self._pool.createAndStartThreads(self)
# Coordinate all threads step by step
self._curStep = -1 # not started yet
maxSteps = gConfig.max_steps # type: ignore
self._execStats.startExec() # start the stop watch
transitionFailed = False
hasAbortedTask = False
while(self._curStep < maxSteps - 1 and
(not transitionFailed) and
(self._runStatus == MainExec.STATUS_RUNNING) and
(not hasAbortedTask)): # maxStep==10, last curStep should be 9
if not gConfig.debug:
# print this only if we are not in debug mode
while not self._runShouldEnd(transitionFailed, hasAbortedTask):
if not gConfig.debug: # print this only if we are not in debug mode
print(".", end="", flush=True)
logger.debug("[TRD] Main thread going to sleep")
# Now main thread (that's us) is ready to enter a step
# let other threads go past the pool barrier, but wait at the
# thread gate
self.crossStepBarrier()
self._stepBarrier.reset() # Other worker threads should now be at the "gate"
self._syncAtBarrier() # For now just cross the barrier
# At this point, all threads should be pass the overall "barrier" and before the per-thread "gate"
# We use this period to do house keeping work, when all worker
# threads are QUIET.
hasAbortedTask = False
for task in self._executedTasks:
if task.isAborted():
print("Task aborted: {}".format(task))
hasAbortedTask = True
break
if hasAbortedTask: # do transition only if tasks are error free
hasAbortedTask = self._hasAbortedTask() # from previous step
if hasAbortedTask:
logger.info("Aborted task encountered, exiting test program")
self._execStats.registerFailure("Aborted Task Encountered")
else:
try:
sm = self._dbManager.getStateMachine()
logger.debug("[STT] starting transitions")
# at end of step, transiton the DB state
sm.transition(self._executedTasks)
logger.debug("[STT] transition ended")
# Due to limitation (or maybe not) of the Python library,
# we cannot share connections across threads
if sm.hasDatabase():
for t in self._pool.threadList:
logger.debug("[DB] use db for all worker threads")
t.useDb()
# t.execSql("use db") # main thread executing "use
# db" on behalf of every worker thread
except taos.error.ProgrammingError as err:
if (err.msg == 'network unavailable'): # broken DB connection
logger.info("DB connection broken, execution failed")
traceback.print_stack()
transitionFailed = True
self._te = None # Not running any more
self._execStats.registerFailure("Broken DB Connection")
# continue # don't do that, need to tap all threads at
# end, and maybe signal them to stop
else:
raise
# finally:
# pass
break # do transition only if tasks are error free
self.resetExecutedTasks() # clear the tasks after we are done
# Ending previous step
transitionFailed = self._doTransition() # To start, we end step -1 first
# Then we move on to the next step
self._releaseAllWorkerThreads(transitionFailed)
# Get ready for next step
logger.debug("<-- Step {} finished".format(self._curStep))
self._curStep += 1 # we are about to get into next step. TODO: race condition here!
# Now not all threads had time to go to sleep
logger.debug(
"\r\n\n--> Step {} starts with main thread waking up".format(self._curStep))
if hasAbortedTask or transitionFailed : # abnormal ending, workers waiting at "gate"
logger.debug("Abnormal ending of main thraed")
else: # regular ending, workers waiting at "barrier"
logger.debug("Regular ending, main thread waiting for all worker threads to stop...")
self._syncAtBarrier()
# A new TE for the new step
if not transitionFailed: # only if not failed
self._te = TaskExecutor(self._curStep)
logger.debug(
"[TRD] Main thread waking up at step {}, tapping worker threads".format(
self._curStep)) # Now not all threads had time to go to sleep
# Worker threads will wake up at this point, and each execute it's
# own task
self.tapAllThreads()
logger.debug("Main thread ready to finish up...")
if not transitionFailed: # only in regular situations
self.crossStepBarrier() # Cross it one last time, after all threads finish
self._stepBarrier.reset()
logger.debug("Main thread in exclusive zone...")
self._te = None # No more executor, time to end
logger.debug("Main thread tapping all threads one last time...")
self.tapAllThreads() # Let the threads run one last time
self._te = None # No more executor, time to end
logger.debug("Main thread tapping all threads one last time...")
self.tapAllThreads() # Let the threads run one last time
logger.debug("\r\n\n--> Main thread ready to finish up...")
logger.debug("Main thread joining all threads")
self._pool.joinAll() # Get all threads to finish
logger.info("\nAll worker threads finished")
......@@ -514,7 +537,7 @@ class LinearQueue():
class DbConn:
TYPE_NATIVE = "native-c"
TYPE_REST = "rest-api"
TYPE_REST = "rest-api"
TYPE_INVALID = "invalid"
@classmethod
......@@ -620,9 +643,13 @@ class DbConnRest(DbConn):
self.isOpen = False
def _doSql(self, sql):
r = requests.post(self._url,
data=sql,
auth=HTTPBasicAuth('root', 'taosdata'))
try:
r = requests.post(self._url,
data = sql,
auth = HTTPBasicAuth('root', 'taosdata'))
except:
print("REST API Failure (TODO: more info here)")
raise
rj = r.json()
# Sanity check for the "Json Result"
if ('status' not in rj):
......@@ -717,7 +744,7 @@ class MyTDSql:
class DbConnNative(DbConn):
def __init__(self):
super().__init__()
self._type = self.TYPE_REST
self._type = self.TYPE_NATIVE
self._conn = None
self._cursor = None
......@@ -2254,8 +2281,9 @@ class ClientManager:
def sigIntHandler(self, signalNumber, frame):
if self._status != MainExec.STATUS_RUNNING:
print("Ignoring repeated SIGINT...")
return # do nothing if it's already not running
print("Repeated SIGINT received, forced exit...")
# return # do nothing if it's already not running
sys.exit(-1)
self._status = MainExec.STATUS_STOPPING # immediately set our status
print("Terminating program...")
......@@ -2394,6 +2422,27 @@ def main():
'''))
# parser.add_argument('-a', '--auto-start-service', action='store_true',
# help='Automatically start/stop the TDengine service (default: false)')
# parser.add_argument('-c', '--connector-type', action='store', default='native', type=str,
# help='Connector type to use: native, rest, or mixed (default: 10)')
# parser.add_argument('-d', '--debug', action='store_true',
# help='Turn on DEBUG mode for more logging (default: false)')
# parser.add_argument('-e', '--run-tdengine', action='store_true',
# help='Run TDengine service in foreground (default: false)')
# parser.add_argument('-l', '--larger-data', action='store_true',
# help='Write larger amount of data during write operations (default: false)')
# parser.add_argument('-p', '--per-thread-db-connection', action='store_true',
# help='Use a single shared db connection (default: false)')
# parser.add_argument('-r', '--record-ops', action='store_true',
# help='Use a pair of always-fsynced fils to record operations performing + performed, for power-off tests (default: false)')
# parser.add_argument('-s', '--max-steps', action='store', default=1000, type=int,
# help='Maximum number of steps to run (default: 100)')
# parser.add_argument('-t', '--num-threads', action='store', default=5, type=int,
# help='Number of threads to run (default: 10)')
# parser.add_argument('-x', '--continue-on-exception', action='store_true',
# help='Continue execution after encountering unexpected/disallowed errors/exceptions (default: false)')
parser.add_argument(
'-a',
'--auto-start-service',
......
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
import os.path
import subprocess
from util.log import *
class TDSimClient:
def __init__(self):
self.testCluster = False
self.cfgDict = {
"numOfLogLines": "100000000",
"numOfThreadsPerCore": "2.0",
"locale": "en_US.UTF-8",
"charset": "UTF-8",
"asyncLog": "0",
"anyIp": "0",
"sdbDebugFlag": "135",
"rpcDebugFlag": "135",
"tmrDebugFlag": "131",
"cDebugFlag": "135",
"udebugFlag": "135",
"jnidebugFlag": "135",
"qdebugFlag": "135",
}
def init(self, path):
self.__init__()
self.path = path
def getLogDir(self):
self.logDir = "%s/sim/psim/log" % (self.path)
return self.logDir
def getCfgDir(self):
self.cfgDir = "%s/sim/psim/cfg" % (self.path)
return self.cfgDir
def setTestCluster(self, value):
self.testCluster = value
def addExtraCfg(self, option, value):
self.cfgDict.update({option: value})
def cfg(self, option, value):
cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath)
if os.system(cmd) != 0:
tdLog.exit(cmd)
def deploy(self):
self.logDir = "%s/sim/psim/log" % (self.path)
self.cfgDir = "%s/sim/psim/cfg" % (self.path)
self.cfgPath = "%s/sim/psim/cfg/taos.cfg" % (self.path)
cmd = "rm -rf " + self.logDir
if os.system(cmd) != 0:
tdLog.exit(cmd)
cmd = "mkdir -p " + self.logDir
if os.system(cmd) != 0:
tdLog.exit(cmd)
cmd = "rm -rf " + self.cfgDir
if os.system(cmd) != 0:
tdLog.exit(cmd)
cmd = "mkdir -p " + self.cfgDir
if os.system(cmd) != 0:
tdLog.exit(cmd)
cmd = "touch " + self.cfgPath
if os.system(cmd) != 0:
tdLog.exit(cmd)
if self.testCluster:
self.cfg("masterIp", "192.168.0.1")
self.cfg("secondIp", "192.168.0.2")
self.cfg("logDir", self.logDir)
for key, value in self.cfgDict.items():
self.cfg(key, value)
tdLog.debug("psim is deployed and configured by %s" % (self.cfgPath))
class TDDnode:
def __init__(self, index):
self.index = index
self.running = 0
self.deployed = 0
self.testCluster = False
self.valgrind = 0
def init(self, path):
self.path = path
def setTestCluster(self, value):
self.testCluster = value
def setValgrind(self, value):
self.valgrind = value
def getDataSize(self):
totalSize = 0
if (self.deployed == 1):
for dirpath, dirnames, filenames in os.walk(self.dataDir):
for f in filenames:
fp = os.path.join(dirpath, f)
if not os.path.islink(fp):
totalSize = totalSize + os.path.getsize(fp)
return totalSize
def deploy(self):
self.logDir = "%s/sim/dnode%d/log" % (self.path, self.index)
self.dataDir = "%s/sim/dnode%d/data" % (self.path, self.index)
self.cfgDir = "%s/sim/dnode%d/cfg" % (self.path, self.index)
self.cfgPath = "%s/sim/dnode%d/cfg/taos.cfg" % (
self.path, self.index)
cmd = "rm -rf " + self.dataDir
if os.system(cmd) != 0:
tdLog.exit(cmd)
cmd = "rm -rf " + self.logDir
if os.system(cmd) != 0:
tdLog.exit(cmd)
cmd = "rm -rf " + self.cfgDir
if os.system(cmd) != 0:
tdLog.exit(cmd)
cmd = "mkdir -p " + self.dataDir
if os.system(cmd) != 0:
tdLog.exit(cmd)
cmd = "mkdir -p " + self.logDir
if os.system(cmd) != 0:
tdLog.exit(cmd)
cmd = "mkdir -p " + self.cfgDir
if os.system(cmd) != 0:
tdLog.exit(cmd)
cmd = "touch " + self.cfgPath
if os.system(cmd) != 0:
tdLog.exit(cmd)
if self.testCluster:
self.startIP()
if self.testCluster:
self.cfg("masterIp", "192.168.0.1")
self.cfg("secondIp", "192.168.0.2")
self.cfg("publicIp", "192.168.0.%d" % (self.index))
self.cfg("internalIp", "192.168.0.%d" % (self.index))
self.cfg("privateIp", "192.168.0.%d" % (self.index))
self.cfg("dataDir", self.dataDir)
self.cfg("logDir", self.logDir)
self.cfg("numOfLogLines", "100000000")
self.cfg("mnodeEqualVnodeNum", "0")
self.cfg("walLevel", "1")
self.cfg("statusInterval", "1")
self.cfg("numOfTotalVnodes", "64")
self.cfg("numOfMnodes", "3")
self.cfg("numOfThreadsPerCore", "2.0")
self.cfg("monitor", "0")
self.cfg("maxVnodeConnections", "30000")
self.cfg("maxMgmtConnections", "30000")
self.cfg("maxMeterConnections", "30000")
self.cfg("maxShellConns", "30000")
self.cfg("locale", "en_US.UTF-8")
self.cfg("charset", "UTF-8")
self.cfg("asyncLog", "0")
self.cfg("anyIp", "0")
self.cfg("dDebugFlag", "135")
self.cfg("mDebugFlag", "135")
self.cfg("sdbDebugFlag", "135")
self.cfg("rpcDebugFlag", "135")
self.cfg("tmrDebugFlag", "131")
self.cfg("cDebugFlag", "135")
self.cfg("httpDebugFlag", "135")
self.cfg("monitorDebugFlag", "135")
self.cfg("udebugFlag", "135")
self.cfg("jnidebugFlag", "135")
self.cfg("qdebugFlag", "135")
self.deployed = 1
tdLog.debug(
"dnode:%d is deployed and configured by %s" %
(self.index, self.cfgPath))
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
def start(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath + "/build/bin/taosd"
if self.deployed == 0:
tdLog.exit("dnode:%d is not deployed" % (self.index))
if self.valgrind == 0:
cmd = "nohup %s -c %s --random-file-fail-factor 0 > /dev/null 2>&1 & " % (
binPath, self.cfgDir)
else:
valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"
cmd = "nohup %s %s -c %s 2>&1 & " % (
valgrindCmdline, binPath, self.cfgDir)
print(cmd)
if os.system(cmd) != 0:
tdLog.exit(cmd)
self.running = 1
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
tdLog.debug("wait 5 seconds for the dnode:%d to start." % (self.index))
time.sleep(5)
def stop(self):
if self.valgrind == 0:
toBeKilled = "taosd"
else:
toBeKilled = "valgrind.bin"
if self.running != 0:
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
for port in range(6030, 6041):
fuserCmd = "fuser -k -n tcp %d" % port
os.system(fuserCmd)
if self.valgrind:
time.sleep(2)
self.running = 0
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
def forcestop(self):
if self.valgrind == 0:
toBeKilled = "taosd"
else:
toBeKilled = "valgrind.bin"
if self.running != 0:
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
for port in range(6030, 6041):
fuserCmd = "fuser -k -n tcp %d" % port
os.system(fuserCmd)
if self.valgrind:
time.sleep(2)
self.running = 0
tdLog.debug("dnode:%d is stopped by kill -KILL" % (self.index))
def startIP(self):
cmd = "sudo ifconfig lo:%d 192.168.0.%d up" % (self.index, self.index)
if os.system(cmd) != 0:
tdLog.exit(cmd)
def stopIP(self):
cmd = "sudo ifconfig lo:%d 192.168.0.%d down" % (
self.index, self.index)
if os.system(cmd) != 0:
tdLog.exit(cmd)
def cfg(self, option, value):
cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath)
if os.system(cmd) != 0:
tdLog.exit(cmd)
def getDnodeRootDir(self, index):
dnodeRootDir = "%s/sim/psim/dnode%d" % (self.path, index)
return dnodeRootDir
def getDnodesRootDir(self):
dnodesRootDir = "%s/sim/psim" % (self.path)
return dnodesRootDir
class TDDnodes:
def __init__(self):
self.dnodes = []
self.dnodes.append(TDDnode(1))
self.dnodes.append(TDDnode(2))
self.dnodes.append(TDDnode(3))
self.dnodes.append(TDDnode(4))
self.dnodes.append(TDDnode(5))
self.dnodes.append(TDDnode(6))
self.dnodes.append(TDDnode(7))
self.dnodes.append(TDDnode(8))
self.dnodes.append(TDDnode(9))
self.dnodes.append(TDDnode(10))
self.simDeployed = False
def init(self, path):
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
binPath = os.path.dirname(os.path.realpath(__file__))
binPath = binPath + "/../../../debug/"
tdLog.debug("binPath %s" % (binPath))
binPath = os.path.realpath(binPath)
tdLog.debug("binPath real path %s" % (binPath))
# cmd = "sudo cp %s/build/lib/libtaos.so /usr/local/lib/taos/" % (binPath)
# tdLog.debug(cmd)
# os.system(cmd)
# cmd = "sudo cp %s/build/bin/taos /usr/local/bin/taos/" % (binPath)
# if os.system(cmd) != 0 :
# tdLog.exit(cmd)
# tdLog.debug("execute %s" % (cmd))
# cmd = "sudo cp %s/build/bin/taosd /usr/local/bin/taos/" % (binPath)
# if os.system(cmd) != 0 :
# tdLog.exit(cmd)
# tdLog.debug("execute %s" % (cmd))
if path == "":
# self.path = os.path.expanduser('~')
self.path = os.path.abspath(binPath + "../../")
else:
self.path = os.path.realpath(path)
for i in range(len(self.dnodes)):
self.dnodes[i].init(self.path)
self.sim = TDSimClient()
self.sim.init(self.path)
def setTestCluster(self, value):
self.testCluster = value
def setValgrind(self, value):
self.valgrind = value
def deploy(self, index):
self.sim.setTestCluster(self.testCluster)
if (self.simDeployed == False):
self.sim.deploy()
self.simDeployed = True
self.check(index)
self.dnodes[index - 1].setTestCluster(self.testCluster)
self.dnodes[index - 1].setValgrind(self.valgrind)
self.dnodes[index - 1].deploy()
def cfg(self, index, option, value):
self.check(index)
self.dnodes[index - 1].cfg(option, value)
def start(self, index):
self.check(index)
self.dnodes[index - 1].start()
def stop(self, index):
self.check(index)
self.dnodes[index - 1].stop()
def getDataSize(self, index):
self.check(index)
return self.dnodes[index - 1].getDataSize()
def forcestop(self, index):
self.check(index)
self.dnodes[index - 1].forcestop()
def startIP(self, index):
self.check(index)
if self.testCluster:
self.dnodes[index - 1].startIP()
def stopIP(self, index):
self.check(index)
if self.dnodes[index - 1].testCluster:
self.dnodes[index - 1].stopIP()
def check(self, index):
if index < 1 or index > 10:
tdLog.exit("index:%d should on a scale of [1, 10]" % (index))
def stopAll(self):
tdLog.info("stop all dnodes")
for i in range(len(self.dnodes)):
self.dnodes[i].stop()
psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
if processID:
cmd = "sudo systemctl stop taosd"
os.system(cmd)
# if os.system(cmd) != 0 :
# tdLog.exit(cmd)
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
# if os.system(cmd) != 0 :
# tdLog.exit(cmd)
def getDnodesRootDir(self):
dnodesRootDir = "%s/sim" % (self.path)
return dnodesRootDir
def getSimCfgPath(self):
return self.sim.getCfgDir()
def getSimLogPath(self):
return self.sim.getLogDir()
def addSimExtraCfg(self, option, value):
self.sim.addExtraCfg(option, value)
tdDnodes = TDDnodes()
......@@ -235,12 +235,12 @@ class TDDnode:
tdLog.exit("dnode:%d is not deployed" % (self.index))
if self.valgrind == 0:
cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
cmd = "nohup %s -c %s --alloc-random-fail --random-file-fail-factor 5 > /dev/null 2>&1 & " % (
binPath, self.cfgDir)
else:
valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"
cmd = "nohup %s %s -c %s --random-file-fail-factor 5 2>&1 & " % (
cmd = "nohup %s %s -c %s 2>&1 & " % (
valgrindCmdline, binPath, self.cfgDir)
print(cmd)
......
#!/bin/bash
# if [ $# != 4 || $# != 5 ]; then
# echo "argument list need input : "
# echo " -n nodeName"
# echo " -s start/stop"
# echo " -c clear"
# exit 1
# fi
NODE_NAME=
EXEC_OPTON=
CLEAR_OPTION="false"
while getopts "n:s:u:x:ct" arg
do
case $arg in
n)
NODE_NAME=$OPTARG
;;
s)
EXEC_OPTON=$OPTARG
;;
c)
CLEAR_OPTION="clear"
;;
t)
SHELL_OPTION="true"
;;
u)
USERS=$OPTARG
;;
x)
SIGNAL=$OPTARG
;;
?)
echo "unkown argument"
;;
esac
done
SCRIPT_DIR=`dirname $0`
cd $SCRIPT_DIR/../
SCRIPT_DIR=`pwd`
IN_TDINTERNAL="community"
if [[ "$SCRIPT_DIR" == *"$IN_TDINTERNAL"* ]]; then
cd ../../..
else
cd ../../
fi
TAOS_DIR=`pwd`
TAOSD_DIR=`find . -name "taosd"|grep bin|head -n1`
if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then
BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2,3`
else
BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2`
fi
BUILD_DIR=$TAOS_DIR/$BIN_DIR/build
SIM_DIR=$TAOS_DIR/sim
NODE_DIR=$SIM_DIR/$NODE_NAME
EXE_DIR=$BUILD_DIR/bin
CFG_DIR=$NODE_DIR/cfg
LOG_DIR=$NODE_DIR/log
DATA_DIR=$NODE_DIR/data
MGMT_DIR=$NODE_DIR/data/mgmt
TSDB_DIR=$NODE_DIR/data/tsdb
TAOS_CFG=$NODE_DIR/cfg/taos.cfg
echo ------------ $EXEC_OPTON $NODE_NAME
TAOS_FLAG=$SIM_DIR/tsim/flag
if [ -f "$TAOS_FLAG" ]; then
EXE_DIR=/usr/local/bin/taos
fi
if [ "$CLEAR_OPTION" = "clear" ]; then
echo rm -rf $MGMT_DIR $TSDB_DIR
rm -rf $TSDB_DIR
rm -rf $MGMT_DIR
fi
if [ "$EXEC_OPTON" = "start" ]; then
echo "ExcuteCmd:" $EXE_DIR/taosd -c $CFG_DIR
if [ "$SHELL_OPTION" = "true" ]; then
nohup valgrind --log-file=${LOG_DIR}/valgrind.log --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 &
else
nohup $EXE_DIR/taosd -c $CFG_DIR --random-file-fail-factor 0 > /dev/null 2>&1 &
fi
else
#relative path
RCFG_DIR=sim/$NODE_NAME/cfg
PID=`ps -ef|grep taosd | grep $RCFG_DIR | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
if [ "$SIGNAL" = "SIGINT" ]; then
echo try to kill by signal SIGINT
kill -SIGINT $PID
else
echo try to kill by signal SIGKILL
kill -9 $PID
fi
sleep 1
PID=`ps -ef|grep taosd | grep $RCFG_DIR | grep -v grep | awk '{print $2}'`
done
fi
......@@ -90,7 +90,7 @@ if [ "$EXEC_OPTON" = "start" ]; then
if [ "$SHELL_OPTION" = "true" ]; then
nohup valgrind --log-file=${LOG_DIR}/valgrind.log --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 &
else
nohup $EXE_DIR/taosd -c $CFG_DIR --random-file-fail-factor 5 > /dev/null 2>&1 &
nohup $EXE_DIR/taosd -c $CFG_DIR --alloc-random-fail --random-file-fail-factor 5 > /dev/null 2>&1 &
fi
else
......
......@@ -52,9 +52,9 @@ system sh/cfg.sh -n dnode1 -c qdebugFlag -v 131
system sh/cfg.sh -n dnode2 -c qdebugFlag -v 131
system sh/cfg.sh -n dnode3 -c qdebugFlag -v 131
system sh/cfg.sh -n dnode1 -c cDebugFlag -v 131
system sh/cfg.sh -n dnode2 -c cDebugFlag -v 131
system sh/cfg.sh -n dnode3 -c cDebugFlag -v 131
system sh/cfg.sh -n dnode1 -c cDebugFlag -v 135
system sh/cfg.sh -n dnode2 -c cDebugFlag -v 135
system sh/cfg.sh -n dnode3 -c cDebugFlag -v 135
system sh/cfg.sh -n dnode1 -c udebugFlag -v 131
system sh/cfg.sh -n dnode2 -c udebugFlag -v 131
......@@ -64,6 +64,10 @@ system sh/cfg.sh -n dnode1 -c wdebugFlag -v 131
system sh/cfg.sh -n dnode2 -c wdebugFlag -v 131
system sh/cfg.sh -n dnode3 -c wdebugFlag -v 131
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 1000000
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 1000000
system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 1000000
print ============== deploy
system sh/exec.sh -n dnode1 -s start
......
......@@ -15,6 +15,7 @@
#define _DEFAULT_SOURCE
#include "os.h"
#include "taoserror.h"
#include "taos.h"
#include "tulog.h"
#include "ttime.h"
......@@ -154,7 +155,7 @@ void *threadFunc(void *param) {
TAOS_RES *pSql = taos_query(con, qstr);
code = taos_errno(pSql);
if (code != 0) {
pError("failed to create table %s%d, reason:%s", stableName, t, taos_errstr(con));
pError("failed to create table %s%d, reason:%s", stableName, t, tstrerror(code));
}
taos_free_result(pSql);
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册