提交 32f57e5b 编写于 作者: sangshuduo's avatar sangshuduo

Merge branch 'develop' into feature/sangshuduo/TD-2771-python-taosdemo

CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
IF (CMAKE_VERSION VERSION_LESS 3.0) IF (CMAKE_VERSION VERSION_LESS 3.0)
PROJECT(TDengine CXX) PROJECT(TDengine CXX)
SET(PROJECT_VERSION_MAJOR "${LIB_MAJOR_VERSION}") SET(PROJECT_VERSION_MAJOR "${LIB_MAJOR_VERSION}")
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
ADD_SUBDIRECTORY(zlib-1.2.11) ADD_SUBDIRECTORY(zlib-1.2.11)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
# MQTT-C build options # MQTT-C build options
option(MQTT_C_OpenSSL_SUPPORT "Build MQTT-C with OpenSSL support?" OFF) option(MQTT_C_OpenSSL_SUPPORT "Build MQTT-C with OpenSSL support?" OFF)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
IF (TD_WINDOWS) IF (TD_WINDOWS)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
IF (TD_WINDOWS) IF (TD_WINDOWS)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
IF (TD_WINDOWS) IF (TD_WINDOWS)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
IF (TD_WINDOWS) IF (TD_WINDOWS)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
IF (TD_WINDOWS) IF (TD_WINDOWS)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
IF (TD_WINDOWS) IF (TD_WINDOWS)
......
...@@ -147,8 +147,8 @@ done ...@@ -147,8 +147,8 @@ done
#echo "verType=${verType} interactiveFqdn=${interactiveFqdn}" #echo "verType=${verType} interactiveFqdn=${interactiveFqdn}"
function kill_taosd() { function kill_process() {
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}') pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then if [ -n "$pid" ]; then
${csudo} kill -9 $pid || : ${csudo} kill -9 $pid || :
fi fi
...@@ -168,7 +168,10 @@ function install_main_path() { ...@@ -168,7 +168,10 @@ function install_main_path() {
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then
${csudo} mkdir -p ${nginx_dir} ${csudo} mkdir -p ${nginx_dir}
fi fi
${csudo} cp ${script_dir}/email ${install_main_dir}/ ||:
if [[ -e ${script_dir}/email ]]; then
${csudo} cp ${script_dir}/email ${install_main_dir}/ ||:
fi
} }
function install_bin() { function install_bin() {
...@@ -680,7 +683,7 @@ function install_service() { ...@@ -680,7 +683,7 @@ function install_service() {
install_service_on_sysvinit install_service_on_sysvinit
else else
# must manual stop taosd # must manual stop taosd
kill_taosd kill_process taosd
fi fi
} }
...@@ -749,9 +752,22 @@ function update_TDengine() { ...@@ -749,9 +752,22 @@ function update_TDengine() {
elif ((${service_mod}==1)); then elif ((${service_mod}==1)); then
${csudo} service taosd stop || : ${csudo} service taosd stop || :
else else
kill_taosd kill_process taosd
fi
sleep 1
fi
if [ "$verMode" == "cluster" ]; then
if pidof nginx &> /dev/null; then
if ((${service_mod}==0)); then
${csudo} systemctl stop nginxd || :
elif ((${service_mod}==1)); then
${csudo} service nginxd stop || :
else
kill_process nginx
fi fi
sleep 1 sleep 1
fi
fi fi
install_main_path install_main_path
......
...@@ -146,8 +146,8 @@ done ...@@ -146,8 +146,8 @@ done
#echo "verType=${verType} interactiveFqdn=${interactiveFqdn}" #echo "verType=${verType} interactiveFqdn=${interactiveFqdn}"
function kill_powerd() { function kill_process() {
pid=$(ps -ef | grep "powerd" | grep -v "grep" | awk '{print $2}') pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then if [ -n "$pid" ]; then
${csudo} kill -9 $pid || : ${csudo} kill -9 $pid || :
fi fi
...@@ -652,7 +652,7 @@ function install_service() { ...@@ -652,7 +652,7 @@ function install_service() {
install_service_on_sysvinit install_service_on_sysvinit
else else
# must manual stop powerd # must manual stop powerd
kill_powerd kill_process powerd
fi fi
} }
...@@ -721,9 +721,21 @@ function update_PowerDB() { ...@@ -721,9 +721,21 @@ function update_PowerDB() {
elif ((${service_mod}==1)); then elif ((${service_mod}==1)); then
${csudo} service powerd stop || : ${csudo} service powerd stop || :
else else
kill_powerd kill_process powerd
fi fi
sleep 1 sleep 1
fi
if [ "$verMode" == "cluster" ]; then
if pidof nginx &> /dev/null; then
if ((${service_mod}==0)); then
${csudo} systemctl stop nginxd || :
elif ((${service_mod}==1)); then
${csudo} service nginxd stop || :
else
kill_process nginx
fi
sleep 1
fi
fi fi
install_main_path install_main_path
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
# Base compile # Base compile
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/mnode/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/mnode/inc)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(inc)
......
...@@ -273,7 +273,8 @@ void tscQueueAsyncError(void(*fp), void *param, int32_t code) { ...@@ -273,7 +273,8 @@ void tscQueueAsyncError(void(*fp), void *param, int32_t code) {
taosScheduleTask(tscQhandle, &schedMsg); taosScheduleTask(tscQhandle, &schedMsg);
} }
void tscAsyncResultOnError(SSqlObj *pSql) { static void tscAsyncResultCallback(SSchedMsg *pMsg) {
SSqlObj* pSql = pMsg->ahandle;
if (pSql == NULL || pSql->signature != pSql) { if (pSql == NULL || pSql->signature != pSql) {
tscDebug("%p SqlObj is freed, not add into queue async res", pSql); tscDebug("%p SqlObj is freed, not add into queue async res", pSql);
return; return;
...@@ -291,6 +292,16 @@ void tscAsyncResultOnError(SSqlObj *pSql) { ...@@ -291,6 +292,16 @@ void tscAsyncResultOnError(SSqlObj *pSql) {
(*pSql->fp)(pSql->param, pSql, pRes->code); (*pSql->fp)(pSql->param, pSql, pRes->code);
} }
void tscAsyncResultOnError(SSqlObj* pSql) {
SSchedMsg schedMsg = {0};
schedMsg.fp = tscAsyncResultCallback;
schedMsg.ahandle = pSql;
schedMsg.thandle = (void *)1;
schedMsg.msg = 0;
taosScheduleTask(tscQhandle, &schedMsg);
}
int tscSendMsgToServer(SSqlObj *pSql); int tscSendMsgToServer(SSqlObj *pSql);
void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
......
...@@ -609,7 +609,7 @@ static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql, int32_t clauseIndex) { ...@@ -609,7 +609,7 @@ static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql, int32_t clauseIndex) {
} }
return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + exprSize + tsBufSize + return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + exprSize + tsBufSize +
tableSerialize + sqlLen + 4096; tableSerialize + sqlLen + 4096 + pQueryInfo->bufLen;
} }
static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char *pMsg) { static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char *pMsg) {
......
...@@ -95,11 +95,21 @@ static bool subAndCheckDone(SSqlObj *pSql, SSqlObj *pParentSql, int idx) { ...@@ -95,11 +95,21 @@ static bool subAndCheckDone(SSqlObj *pSql, SSqlObj *pParentSql, int idx) {
pthread_mutex_lock(&subState->mutex); pthread_mutex_lock(&subState->mutex);
bool done = allSubqueryDone(pParentSql);
if (done) {
tscDebug("%p subquery:%p,%d all subs already done", pParentSql, pSql, idx);
pthread_mutex_unlock(&subState->mutex);
return false;
}
tscDebug("%p subquery:%p,%d state set to 1", pParentSql, pSql, idx); tscDebug("%p subquery:%p,%d state set to 1", pParentSql, pSql, idx);
subState->states[idx] = 1; subState->states[idx] = 1;
bool done = allSubqueryDone(pParentSql); done = allSubqueryDone(pParentSql);
pthread_mutex_unlock(&subState->mutex); pthread_mutex_unlock(&subState->mutex);
...@@ -2245,7 +2255,9 @@ static void tscAbortFurtherRetryRetrieval(SRetrieveSupport *trsupport, TAOS_RES ...@@ -2245,7 +2255,9 @@ static void tscAbortFurtherRetryRetrieval(SRetrieveSupport *trsupport, TAOS_RES
* current query failed, and the retry count is less than the available * current query failed, and the retry count is less than the available
* count, retry query clear previous retrieved data, then launch a new sub query * count, retry query clear previous retrieved data, then launch a new sub query
*/ */
static int32_t tscReissueSubquery(SRetrieveSupport *oriTrs, SSqlObj *pSql, int32_t code) { static int32_t tscReissueSubquery(SRetrieveSupport *oriTrs, SSqlObj *pSql, int32_t code, int32_t *sent) {
*sent = 0;
SRetrieveSupport *trsupport = malloc(sizeof(SRetrieveSupport)); SRetrieveSupport *trsupport = malloc(sizeof(SRetrieveSupport));
if (trsupport == NULL) { if (trsupport == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
...@@ -2277,21 +2289,28 @@ static int32_t tscReissueSubquery(SRetrieveSupport *oriTrs, SSqlObj *pSql, int32 ...@@ -2277,21 +2289,28 @@ static int32_t tscReissueSubquery(SRetrieveSupport *oriTrs, SSqlObj *pSql, int32
SSqlObj *pNew = tscCreateSTableSubquery(trsupport->pParentSql, trsupport, pSql); SSqlObj *pNew = tscCreateSTableSubquery(trsupport->pParentSql, trsupport, pSql);
if (pNew == NULL) { if (pNew == NULL) {
tscError("%p sub:%p failed to create new subquery due to error:%s, abort retry, vgId:%d, orderOfSub:%d", tscError("%p sub:%p failed to create new subquery due to error:%s, abort retry, vgId:%d, orderOfSub:%d",
trsupport->pParentSql, pSql, tstrerror(terrno), pVgroup->vgId, trsupport->subqueryIndex); oriTrs->pParentSql, pSql, tstrerror(terrno), pVgroup->vgId, oriTrs->subqueryIndex);
pParentSql->res.code = terrno; pParentSql->res.code = terrno;
trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; oriTrs->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY;
tfree(trsupport);
return pParentSql->res.code; return pParentSql->res.code;
} }
int32_t ret = tscProcessSql(pNew); int32_t ret = tscProcessSql(pNew);
*sent = 1;
// if failed to process sql, let following code handle the pSql // if failed to process sql, let following code handle the pSql
if (ret == TSDB_CODE_SUCCESS) { if (ret == TSDB_CODE_SUCCESS) {
tscFreeRetrieveSup(pSql);
taos_free_result(pSql); taos_free_result(pSql);
return ret; return ret;
} else { } else {
pSql->pSubs[trsupport->subqueryIndex] = pSql;
tscFreeRetrieveSup(pNew);
taos_free_result(pNew);
return ret; return ret;
} }
} }
...@@ -2328,7 +2347,10 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO ...@@ -2328,7 +2347,10 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
subqueryIndex, tstrerror(pParentSql->res.code)); subqueryIndex, tstrerror(pParentSql->res.code));
} else { } else {
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && pParentSql->res.code == TSDB_CODE_SUCCESS) { if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && pParentSql->res.code == TSDB_CODE_SUCCESS) {
if (tscReissueSubquery(trsupport, pSql, numOfRows) == TSDB_CODE_SUCCESS) { int32_t sent = 0;
tscReissueSubquery(trsupport, pSql, numOfRows, &sent);
if (sent) {
return; return;
} }
} else { // reach the maximum retry count, abort } else { // reach the maximum retry count, abort
...@@ -2450,7 +2472,6 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR ...@@ -2450,7 +2472,6 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
SRetrieveSupport *trsupport = (SRetrieveSupport *)param; SRetrieveSupport *trsupport = (SRetrieveSupport *)param;
if (pSql->param == NULL || param == NULL) { if (pSql->param == NULL || param == NULL) {
tscDebug("%p already freed in dnodecallback", pSql); tscDebug("%p already freed in dnodecallback", pSql);
assert(pSql->res.code == TSDB_CODE_TSC_QUERY_CANCELLED);
return; return;
} }
...@@ -2482,7 +2503,10 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR ...@@ -2482,7 +2503,10 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY) { if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY) {
tscError("%p sub:%p failed code:%s, retry:%d", pParentSql, pSql, tstrerror(numOfRows), trsupport->numOfRetry); tscError("%p sub:%p failed code:%s, retry:%d", pParentSql, pSql, tstrerror(numOfRows), trsupport->numOfRetry);
if (tscReissueSubquery(trsupport, pSql, numOfRows) == TSDB_CODE_SUCCESS) { int32_t sent = 0;
tscReissueSubquery(trsupport, pSql, numOfRows, &sent);
if (sent) {
return; return;
} }
} else { } else {
...@@ -2604,7 +2628,11 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { ...@@ -2604,7 +2628,11 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY) { if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY) {
tscError("%p sub:%p failed code:%s, retry:%d", pParentSql, pSql, tstrerror(code), trsupport->numOfRetry); tscError("%p sub:%p failed code:%s, retry:%d", pParentSql, pSql, tstrerror(code), trsupport->numOfRetry);
if (tscReissueSubquery(trsupport, pSql, code) == TSDB_CODE_SUCCESS) {
int32_t sent = 0;
tscReissueSubquery(trsupport, pSql, code, &sent);
if (sent) {
return; return;
} }
} else { } else {
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest) FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(inc)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
IF (TD_LINUX_64) IF (TD_LINUX_64)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
IF (TD_LINUX_64) IF (TD_LINUX_64)
......
Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
This program is free software: you can use, redistribute, and/or modify
it under the terms of the GNU Affero General Public License, version 3
or later ("AGPL"), as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
# TDengine python client interface
\ No newline at end of file
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="taos",
version="2.0.4",
author="Taosdata Inc.",
author_email="support@taosdata.com",
description="TDengine python client package",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/pypa/sampleproject",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: MacOS X",
],
)
from .connection import TDengineConnection
from .cursor import TDengineCursor
# Globals
apilevel = '2.0.4'
threadsafety = 0
paramstyle = 'pyformat'
__all__ = ['connection', 'cursor']
def connect(*args, **kwargs):
""" Function to return a TDengine connector object
Current supporting keyword parameters:
@dsn: Data source name as string
@user: Username as string(optional)
@password: Password as string(optional)
@host: Hostname(optional)
@database: Database name(optional)
@rtype: TDengineConnector
"""
return TDengineConnection(*args, **kwargs)
import ctypes
from .constants import FieldType
from .error import *
import math
import datetime
def _convert_millisecond_to_datetime(milli):
return datetime.datetime.fromtimestamp(milli/1000.0)
def _convert_microsecond_to_datetime(micro):
return datetime.datetime.fromtimestamp(micro/1000000.0)
def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C bool row to python row
"""
_timestamp_converter = _convert_millisecond_to_datetime
if micro:
_timestamp_converter = _convert_microsecond_to_datetime
if num_of_rows > 0:
return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]))
else:
return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]))
def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C bool row to python row
"""
if num_of_rows > 0:
return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ]
else:
return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[:abs(num_of_rows)] ]
def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C tinyint row to python row
"""
if num_of_rows > 0:
return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ]
else:
return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ]
def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C smallint row to python row
"""
if num_of_rows > 0:
return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)]]
else:
return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)] ]
def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C int row to python row
"""
if num_of_rows > 0:
return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)] ]
else:
return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)] ]
def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C bigint row to python row
"""
if num_of_rows > 0:
return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)] ]
else:
return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)] ]
def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C float row to python row
"""
if num_of_rows > 0:
return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)] ]
else:
return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)] ]
def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C double row to python row
"""
if num_of_rows > 0:
return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)] ]
else:
return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)] ]
def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C binary row to python row
"""
assert(nbytes is not None)
if num_of_rows > 0:
return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
else:
return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C nchar row to python row
"""
assert(nbytes is not None)
res=[]
for i in range(abs(num_of_rows)):
try:
if num_of_rows >= 0:
tmpstr = ctypes.c_char_p(data)
res.append( tmpstr.value.decode() )
else:
res.append( (ctypes.cast(data+nbytes*i, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value )
except ValueError:
res.append(None)
return res
def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C binary row to python row
"""
assert(nbytes is not None)
res=[]
if num_of_rows > 0:
for i in range(abs(num_of_rows)):
try:
rbyte=ctypes.cast(data+nbytes*i,ctypes.POINTER(ctypes.c_short))[:1].pop()
tmpstr = ctypes.c_char_p(data+nbytes*i+2)
res.append( tmpstr.value.decode()[0:rbyte] )
except ValueError:
res.append(None)
else:
for i in range(abs(num_of_rows)):
try:
rbyte=ctypes.cast(data+nbytes*i,ctypes.POINTER(ctypes.c_short))[:1].pop()
tmpstr = ctypes.c_char_p(data+nbytes*i+2)
res.append( tmpstr.value.decode()[0:rbyte] )
except ValueError:
res.append(None)
return res
def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C nchar row to python row
"""
assert(nbytes is not None)
res=[]
if num_of_rows >= 0:
for i in range(abs(num_of_rows)):
try:
tmpstr = ctypes.c_char_p(data+nbytes*i+2)
res.append( tmpstr.value.decode() )
except ValueError:
res.append(None)
else:
for i in range(abs(num_of_rows)):
try:
res.append( (ctypes.cast(data+nbytes*i+2, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value )
except ValueError:
res.append(None)
return res
_CONVERT_FUNC = {
FieldType.C_BOOL: _crow_bool_to_python,
FieldType.C_TINYINT : _crow_tinyint_to_python,
FieldType.C_SMALLINT : _crow_smallint_to_python,
FieldType.C_INT : _crow_int_to_python,
FieldType.C_BIGINT : _crow_bigint_to_python,
FieldType.C_FLOAT : _crow_float_to_python,
FieldType.C_DOUBLE : _crow_double_to_python,
FieldType.C_BINARY: _crow_binary_to_python,
FieldType.C_TIMESTAMP : _crow_timestamp_to_python,
FieldType.C_NCHAR : _crow_nchar_to_python
}
_CONVERT_FUNC_BLOCK = {
FieldType.C_BOOL: _crow_bool_to_python,
FieldType.C_TINYINT : _crow_tinyint_to_python,
FieldType.C_SMALLINT : _crow_smallint_to_python,
FieldType.C_INT : _crow_int_to_python,
FieldType.C_BIGINT : _crow_bigint_to_python,
FieldType.C_FLOAT : _crow_float_to_python,
FieldType.C_DOUBLE : _crow_double_to_python,
FieldType.C_BINARY: _crow_binary_to_python_block,
FieldType.C_TIMESTAMP : _crow_timestamp_to_python,
FieldType.C_NCHAR : _crow_nchar_to_python_block
}
# Corresponding TAOS_FIELD structure in C
class TaosField(ctypes.Structure):
_fields_ = [('name', ctypes.c_char * 65),
('type', ctypes.c_char),
('bytes', ctypes.c_short)]
# C interface class
class CTaosInterface(object):
libtaos = ctypes.CDLL('libtaos.dylib')
libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField)
libtaos.taos_init.restype = None
libtaos.taos_connect.restype = ctypes.c_void_p
#libtaos.taos_use_result.restype = ctypes.c_void_p
libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p)
libtaos.taos_errstr.restype = ctypes.c_char_p
libtaos.taos_subscribe.restype = ctypes.c_void_p
libtaos.taos_consume.restype = ctypes.c_void_p
libtaos.taos_fetch_lengths.restype = ctypes.c_void_p
libtaos.taos_free_result.restype = None
libtaos.taos_errno.restype = ctypes.c_int
libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p)
def __init__(self, config=None):
'''
Function to initialize the class
@host : str, hostname to connect
@user : str, username to connect to server
@password : str, password to connect to server
@db : str, default db to use when log in
@config : str, config directory
@rtype : None
'''
if config is None:
self._config = ctypes.c_char_p(None)
else:
try:
self._config = ctypes.c_char_p(config.encode('utf-8'))
except AttributeError:
raise AttributeError("config is expected as a str")
if config != None:
CTaosInterface.libtaos.taos_options(3, self._config)
CTaosInterface.libtaos.taos_init()
@property
def config(self):
""" Get current config
"""
return self._config
def connect(self, host=None, user="root", password="taosdata", db=None, port=0):
'''
Function to connect to server
@rtype: c_void_p, TDengine handle
'''
# host
try:
_host = ctypes.c_char_p(host.encode(
"utf-8")) if host != None else ctypes.c_char_p(None)
except AttributeError:
raise AttributeError("host is expected as a str")
# user
try:
_user = ctypes.c_char_p(user.encode("utf-8"))
except AttributeError:
raise AttributeError("user is expected as a str")
# password
try:
_password = ctypes.c_char_p(password.encode("utf-8"))
except AttributeError:
raise AttributeError("password is expected as a str")
# db
try:
_db = ctypes.c_char_p(
db.encode("utf-8")) if db != None else ctypes.c_char_p(None)
except AttributeError:
raise AttributeError("db is expected as a str")
# port
try:
_port = ctypes.c_int(port)
except TypeError:
raise TypeError("port is expected as an int")
connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect(
_host, _user, _password, _db, _port))
if connection.value == None:
print('connect to TDengine failed')
raise ConnectionError("connect to TDengine failed")
# sys.exit(1)
#else:
# print('connect to TDengine success')
return connection
@staticmethod
def close(connection):
'''Close the TDengine handle
'''
CTaosInterface.libtaos.taos_close(connection)
#print('connection is closed')
@staticmethod
def query(connection, sql):
'''Run SQL
@sql: str, sql string to run
@rtype: 0 on success and -1 on failure
'''
try:
return CTaosInterface.libtaos.taos_query(connection, ctypes.c_char_p(sql.encode('utf-8')))
except AttributeError:
raise AttributeError("sql is expected as a string")
# finally:
# CTaosInterface.libtaos.close(connection)
@staticmethod
def affectedRows(result):
"""The affected rows after runing query
"""
return CTaosInterface.libtaos.taos_affected_rows(result)
@staticmethod
def subscribe(connection, restart, topic, sql, interval):
"""Create a subscription
@restart boolean,
@sql string, sql statement for data query, must be a 'select' statement.
@topic string, name of this subscription
"""
return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe(
connection,
1 if restart else 0,
ctypes.c_char_p(topic.encode('utf-8')),
ctypes.c_char_p(sql.encode('utf-8')),
None,
None,
interval))
@staticmethod
def consume(sub):
"""Consume data of a subscription
"""
result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub))
fields = []
pfields = CTaosInterface.fetchFields(result)
for i in range(CTaosInterface.libtaos.taos_num_fields(result)):
fields.append({'name': pfields[i].name.decode('utf-8'),
'bytes': pfields[i].bytes,
'type': ord(pfields[i].type)})
return result, fields
@staticmethod
def unsubscribe(sub, keepProgress):
"""Cancel a subscription
"""
CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0)
@staticmethod
def useResult(result):
'''Use result after calling self.query
'''
fields = []
pfields = CTaosInterface.fetchFields(result)
for i in range(CTaosInterface.fieldsCount(result)):
fields.append({'name': pfields[i].name.decode('utf-8'),
'bytes': pfields[i].bytes,
'type': ord(pfields[i].type)})
return fields
@staticmethod
def fetchBlock(result, fields):
pblock = ctypes.c_void_p(0)
num_of_rows = CTaosInterface.libtaos.taos_fetch_block(
result, ctypes.byref(pblock))
if num_of_rows == 0:
return None, 0
isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO)
blocks = [None] * len(fields)
fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result)
fieldLen = [ele for ele in ctypes.cast(fieldL, ctypes.POINTER(ctypes.c_int))[:len(fields)]]
for i in range(len(fields)):
data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
if fields[i]['type'] not in _CONVERT_FUNC_BLOCK:
raise DatabaseError("Invalid data type returned from database")
blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']](data, num_of_rows, fieldLen[i], isMicro)
return blocks, abs(num_of_rows)
@staticmethod
def fetchRow(result, fields):
pblock = ctypes.c_void_p(0)
pblock = CTaosInterface.libtaos.taos_fetch_row(result)
if pblock :
num_of_rows = 1
isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO)
blocks = [None] * len(fields)
fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result)
fieldLen = [ele for ele in ctypes.cast(fieldL, ctypes.POINTER(ctypes.c_int))[:len(fields)]]
for i in range(len(fields)):
data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
if fields[i]['type'] not in _CONVERT_FUNC:
raise DatabaseError("Invalid data type returned from database")
if data is None:
blocks[i] = [None]
else:
blocks[i] = _CONVERT_FUNC[fields[i]['type']](data, num_of_rows, fieldLen[i], isMicro)
else:
return None, 0
return blocks, abs(num_of_rows)
@staticmethod
def freeResult(result):
CTaosInterface.libtaos.taos_free_result(result)
result.value = None
@staticmethod
def fieldsCount(result):
return CTaosInterface.libtaos.taos_field_count(result)
@staticmethod
def fetchFields(result):
return CTaosInterface.libtaos.taos_fetch_fields(result)
# @staticmethod
# def fetchRow(result, fields):
# l = []
# row = CTaosInterface.libtaos.taos_fetch_row(result)
# if not row:
# return None
# for i in range(len(fields)):
# l.append(CTaosInterface.getDataValue(
# row[i], fields[i]['type'], fields[i]['bytes']))
# return tuple(l)
# @staticmethod
# def getDataValue(data, dtype, byte):
# '''
# '''
# if not data:
# return None
# if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL):
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0]
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT):
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0]
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT):
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0]
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT):
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0]
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT):
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0]
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT):
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0]
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE):
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0]
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY):
# return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00')
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP):
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0]
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR):
# return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00')
@staticmethod
def errno(result):
"""Return the error number.
"""
return CTaosInterface.libtaos.taos_errno(result)
@staticmethod
def errStr(result):
"""Return the error styring
"""
return CTaosInterface.libtaos.taos_errstr(result).decode('utf-8')
if __name__ == '__main__':
cinter = CTaosInterface()
conn = cinter.connect()
result = cinter.query(conn, 'show databases')
print('Query Affected rows: {}'.format(cinter.affectedRows(result)))
fields = CTaosInterface.useResult(result)
data, num_of_rows = CTaosInterface.fetchBlock(result, fields)
print(data)
cinter.freeResult(result)
cinter.close(conn)
from .cursor import TDengineCursor
from .subscription import TDengineSubscription
from .cinterface import CTaosInterface
class TDengineConnection(object):
""" TDengine connection object
"""
def __init__(self, *args, **kwargs):
self._conn = None
self._host = None
self._user = "root"
self._password = "taosdata"
self._database = None
self._port = 0
self._config = None
self._chandle = None
self.config(**kwargs)
def config(self, **kwargs):
# host
if 'host' in kwargs:
self._host = kwargs['host']
# user
if 'user' in kwargs:
self._user = kwargs['user']
# password
if 'password' in kwargs:
self._password = kwargs['password']
# database
if 'database' in kwargs:
self._database = kwargs['database']
# port
if 'port' in kwargs:
self._port = kwargs['port']
# config
if 'config' in kwargs:
self._config = kwargs['config']
self._chandle = CTaosInterface(self._config)
self._conn = self._chandle.connect(self._host, self._user, self._password, self._database, self._port)
def close(self):
"""Close current connection.
"""
return CTaosInterface.close(self._conn)
def subscribe(self, restart, topic, sql, interval):
"""Create a subscription.
"""
if self._conn is None:
return None
sub = CTaosInterface.subscribe(self._conn, restart, topic, sql, interval)
return TDengineSubscription(sub)
def cursor(self):
"""Return a new Cursor object using the connection.
"""
return TDengineCursor(self)
def commit(self):
"""Commit any pending transaction to the database.
Since TDengine do not support transactions, the implement is void functionality.
"""
pass
def rollback(self):
"""Void functionality
"""
pass
def clear_result_set(self):
"""Clear unused result set on this connection.
"""
pass
if __name__ == "__main__":
conn = TDengineConnection(host='192.168.1.107')
conn.close()
print("Hello world")
\ No newline at end of file
"""Constants in TDengine python
"""
from .dbapi import *
class FieldType(object):
"""TDengine Field Types
"""
# type_code
C_NULL = 0
C_BOOL = 1
C_TINYINT = 2
C_SMALLINT = 3
C_INT = 4
C_BIGINT = 5
C_FLOAT = 6
C_DOUBLE = 7
C_BINARY = 8
C_TIMESTAMP = 9
C_NCHAR = 10
# NULL value definition
# NOTE: These values should change according to C definition in tsdb.h
C_BOOL_NULL = 0x02
C_TINYINT_NULL = -128
C_SMALLINT_NULL = -32768
C_INT_NULL = -2147483648
C_BIGINT_NULL = -9223372036854775808
C_FLOAT_NULL = float('nan')
C_DOUBLE_NULL = float('nan')
C_BINARY_NULL = bytearray([int('0xff', 16)])
# Timestamp precision definition
C_TIMESTAMP_MILLI = 0
C_TIMESTAMP_MICRO = 1
from .cinterface import CTaosInterface
from .error import *
from .constants import FieldType
import threading
# querySeqNum = 0
class TDengineCursor(object):
"""Database cursor which is used to manage the context of a fetch operation.
Attributes:
.description: Read-only attribute consists of 7-item sequences:
> name (mondatory)
> type_code (mondatory)
> display_size
> internal_size
> precision
> scale
> null_ok
This attribute will be None for operations that do not return rows or
if the cursor has not had an operation invoked via the .execute*() method yet.
.rowcount:This read-only attribute specifies the number of rows that the last
.execute*() produced (for DQL statements like SELECT) or affected
"""
def __init__(self, connection=None):
self._description = []
self._rowcount = -1
self._connection = None
self._result = None
self._fields = None
self._block = None
self._block_rows = -1
self._block_iter = 0
self._affected_rows = 0
self._logfile = ""
self._threadId = threading.get_ident()
if connection is not None:
self._connection = connection
def __iter__(self):
return self
def __next__(self):
if self._result is None or self._fields is None:
raise OperationalError("Invalid use of fetch iterator")
if self._block_rows <= self._block_iter:
block, self._block_rows = CTaosInterface.fetchRow(
self._result, self._fields)
if self._block_rows == 0:
raise StopIteration
self._block = list(map(tuple, zip(*block)))
self._block_iter = 0
data = self._block[self._block_iter]
self._block_iter += 1
return data
@property
def description(self):
"""Return the description of the object.
"""
return self._description
@property
def rowcount(self):
"""Return the rowcount of the object
"""
return self._rowcount
@property
def affected_rows(self):
"""Return the rowcount of insertion
"""
return self._affected_rows
def callproc(self, procname, *args):
"""Call a stored database procedure with the given name.
Void functionality since no stored procedures.
"""
pass
def log(self, logfile):
self._logfile = logfile
def close(self):
"""Close the cursor.
"""
if self._connection is None:
return False
self._reset_result()
self._connection = None
return True
def execute(self, operation, params=None):
"""Prepare and execute a database operation (query or command).
"""
# if threading.get_ident() != self._threadId:
# info ="Cursor execute:Thread ID not match,creater:"+str(self._threadId)+" caller:"+str(threading.get_ident())
# raise OperationalError(info)
# print(info)
# return None
if not operation:
return None
if not self._connection:
# TODO : change the exception raised here
raise ProgrammingError("Cursor is not connected")
self._reset_result()
stmt = operation
if params is not None:
pass
# global querySeqNum
# querySeqNum += 1
# localSeqNum = querySeqNum # avoid raice condition
# print(" >> Exec Query ({}): {}".format(localSeqNum, str(stmt)))
self._result = CTaosInterface.query(self._connection._conn, stmt)
# print(" << Query ({}) Exec Done".format(localSeqNum))
if (self._logfile):
with open(self._logfile, "a") as logfile:
logfile.write("%s;\n" % operation)
errno = CTaosInterface.libtaos.taos_errno(self._result)
if errno == 0:
if CTaosInterface.fieldsCount(self._result) == 0:
self._affected_rows += CTaosInterface.affectedRows(
self._result )
return CTaosInterface.affectedRows(self._result )
else:
self._fields = CTaosInterface.useResult(
self._result)
return self._handle_result()
else:
raise ProgrammingError(
CTaosInterface.errStr(
self._result), errno)
def executemany(self, operation, seq_of_parameters):
"""Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters.
"""
pass
def fetchone(self):
"""Fetch the next row of a query result set, returning a single sequence, or None when no more data is available.
"""
pass
def fetchmany(self):
pass
def istype(self, col, dataType):
if (dataType.upper() == "BOOL"):
if (self._description[col][1] == FieldType.C_BOOL):
return True
if (dataType.upper() == "TINYINT"):
if (self._description[col][1] == FieldType.C_TINYINT):
return True
if (dataType.upper() == "INT"):
if (self._description[col][1] == FieldType.C_INT):
return True
if (dataType.upper() == "BIGINT"):
if (self._description[col][1] == FieldType.C_INT):
return True
if (dataType.upper() == "FLOAT"):
if (self._description[col][1] == FieldType.C_FLOAT):
return True
if (dataType.upper() == "DOUBLE"):
if (self._description[col][1] == FieldType.C_DOUBLE):
return True
if (dataType.upper() == "BINARY"):
if (self._description[col][1] == FieldType.C_BINARY):
return True
if (dataType.upper() == "TIMESTAMP"):
if (self._description[col][1] == FieldType.C_TIMESTAMP):
return True
if (dataType.upper() == "NCHAR"):
if (self._description[col][1] == FieldType.C_NCHAR):
return True
return False
def fetchall_row(self):
"""Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation.
"""
if self._result is None or self._fields is None:
raise OperationalError("Invalid use of fetchall")
buffer = [[] for i in range(len(self._fields))]
self._rowcount = 0
while True:
block, num_of_fields = CTaosInterface.fetchRow(self._result, self._fields)
errno = CTaosInterface.libtaos.taos_errno(self._result)
if errno != 0:
raise ProgrammingError(CTaosInterface.errStr(self._result), errno)
if num_of_fields == 0:
break
self._rowcount += num_of_fields
for i in range(len(self._fields)):
buffer[i].extend(block[i])
return list(map(tuple, zip(*buffer)))
def fetchall(self):
if self._result is None or self._fields is None:
raise OperationalError("Invalid use of fetchall")
buffer = [[] for i in range(len(self._fields))]
self._rowcount = 0
while True:
block, num_of_fields = CTaosInterface.fetchBlock(self._result, self._fields)
errno = CTaosInterface.libtaos.taos_errno(self._result)
if errno != 0:
raise ProgrammingError(CTaosInterface.errStr(self._result), errno)
if num_of_fields == 0: break
self._rowcount += num_of_fields
for i in range(len(self._fields)):
buffer[i].extend(block[i])
return list(map(tuple, zip(*buffer)))
def nextset(self):
"""
"""
pass
def setinputsize(self, sizes):
pass
def setutputsize(self, size, column=None):
pass
def _reset_result(self):
"""Reset the result to unused version.
"""
self._description = []
self._rowcount = -1
if self._result is not None:
CTaosInterface.freeResult(self._result)
self._result = None
self._fields = None
self._block = None
self._block_rows = -1
self._block_iter = 0
self._affected_rows = 0
def _handle_result(self):
"""Handle the return result from query.
"""
# if threading.get_ident() != self._threadId:
# info = "Cursor handleresult:Thread ID not match,creater:"+str(self._threadId)+" caller:"+str(threading.get_ident())
# raise OperationalError(info)
# print(info)
# return None
self._description = []
for ele in self._fields:
self._description.append(
(ele['name'], ele['type'], None, None, None, None, False))
return self._result
"""Type Objects and Constructors.
"""
import time
import datetime
class DBAPITypeObject(object):
def __init__(self, *values):
self.values = values
def __com__(self, other):
if other in self.values:
return 0
if other < self.values:
return 1
else:
return -1
Date = datetime.date
Time = datetime.time
Timestamp = datetime.datetime
def DataFromTicks(ticks):
return Date(*time.localtime(ticks)[:3])
def TimeFromTicks(ticks):
return Time(*time.localtime(ticks)[3:6])
def TimestampFromTicks(ticks):
return Timestamp(*time.localtime(ticks)[:6])
Binary = bytes
# STRING = DBAPITypeObject(*constants.FieldType.get_string_types())
# BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types())
# NUMBER = BAPITypeObject(*constants.FieldType.get_number_types())
# DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types())
# ROWID = DBAPITypeObject()
\ No newline at end of file
"""Python exceptions
"""
class Error(Exception):
def __init__(self, msg=None, errno=None):
self.msg = msg
self._full_msg = self.msg
self.errno = errno
def __str__(self):
return self._full_msg
class Warning(Exception):
"""Exception raised for important warnings like data truncations while inserting.
"""
pass
class InterfaceError(Error):
"""Exception raised for errors that are related to the database interface rather than the database itself.
"""
pass
class DatabaseError(Error):
"""Exception raised for errors that are related to the database.
"""
pass
class DataError(DatabaseError):
"""Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range.
"""
pass
class OperationalError(DatabaseError):
"""Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer
"""
pass
class IntegrityError(DatabaseError):
"""Exception raised when the relational integrity of the database is affected.
"""
pass
class InternalError(DatabaseError):
"""Exception raised when the database encounters an internal error.
"""
pass
class ProgrammingError(DatabaseError):
"""Exception raised for programming errors.
"""
pass
class NotSupportedError(DatabaseError):
"""Exception raised in case a method or database API was used which is not supported by the database,.
"""
pass
\ No newline at end of file
from .cinterface import CTaosInterface
from .error import *
class TDengineSubscription(object):
"""TDengine subscription object
"""
def __init__(self, sub):
self._sub = sub
def consume(self):
"""Consume rows of a subscription
"""
if self._sub is None:
raise OperationalError("Invalid use of consume")
result, fields = CTaosInterface.consume(self._sub)
buffer = [[] for i in range(len(fields))]
while True:
block, num_of_fields = CTaosInterface.fetchBlock(result, fields)
if num_of_fields == 0: break
for i in range(len(fields)):
buffer[i].extend(block[i])
self.fields = fields
return list(map(tuple, zip(*buffer)))
def close(self, keepProgress = True):
"""Close the Subscription.
"""
if self._sub is None:
return False
CTaosInterface.unsubscribe(self._sub, keepProgress)
return True
if __name__ == '__main__':
from .connection import TDengineConnection
conn = TDengineConnection(host="127.0.0.1", user="root", password="taosdata", database="test")
# Generate a cursor object to run SQL commands
sub = conn.subscribe(True, "test", "select * from meters;", 1000)
for i in range(0,10):
data = sub.consume()
for d in data:
print(d)
sub.close()
conn.close()
\ No newline at end of file
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(inc)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
LIST(APPEND CQTEST_SRC ./cqtest.c) LIST(APPEND CQTEST_SRC ./cqtest.c)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
ADD_SUBDIRECTORY(shell) ADD_SUBDIRECTORY(shell)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc)
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
"thread_count": 4, "thread_count": 4,
"thread_count_create_tbl": 1, "thread_count_create_tbl": 1,
"result_file": "./insert_res.txt", "result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"databases": [{ "databases": [{
"dbinfo": { "dbinfo": {
"name": "db", "name": "db",
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
"port": 6030, "port": 6030,
"user": "root", "user": "root",
"password": "taosdata", "password": "taosdata",
"confirm_parameter_prompt": "yes",
"databases": "db01", "databases": "db01",
"specified_table_query": "specified_table_query":
{"query_interval":1, "concurrent":1, {"query_interval":1, "concurrent":1,
......
...@@ -181,6 +181,7 @@ typedef struct SArguments_S { ...@@ -181,6 +181,7 @@ typedef struct SArguments_S {
char * sqlFile; char * sqlFile;
bool use_metric; bool use_metric;
bool insert_only; bool insert_only;
bool answer_yes;
char * output_file; char * output_file;
int mode; int mode;
char * datatype[MAX_NUM_DATATYPE + 1]; char * datatype[MAX_NUM_DATATYPE + 1];
...@@ -430,13 +431,14 @@ typedef struct curlMemInfo_S { ...@@ -430,13 +431,14 @@ typedef struct curlMemInfo_S {
{0, 'w', "length_of_chartype", 0, "The length of data_type 'BINARY' or 'NCHAR'. Default is 16", 4}, {0, 'w', "length_of_chartype", 0, "The length of data_type 'BINARY' or 'NCHAR'. Default is 16", 4},
{0, 'l', "num_of_cols_per_record", 0, "The number of columns per record. Default is 10.", 4}, {0, 'l', "num_of_cols_per_record", 0, "The number of columns per record. Default is 10.", 4},
{0, 'T', "num_of_threads", 0, "The number of threads. Default is 10.", 4}, {0, 'T', "num_of_threads", 0, "The number of threads. Default is 10.", 4},
// {0, 'r', "num_of_records_per_req", 0, "The number of records per request. Default is 100.", 4}, // {0, 'r', "num_of_records_per_req", 0, "The number of records per request. Default is 100.", 4},
{0, 't', "num_of_tables", 0, "The number of tables. Default is 10000.", 4}, {0, 't', "num_of_tables", 0, "The number of tables. Default is 10000.", 4},
{0, 'n', "num_of_records_per_table", 0, "The number of records per table. Default is 10000.", 4}, {0, 'n', "num_of_records_per_table", 0, "The number of records per table. Default is 10000.", 4},
{0, 'x', 0, 0, "Not insert only flag.", 4}, {0, 'x', 0, 0, "Not insert only flag.", 4},
{0, 'y', 0, 0, "Default input yes for prompt.", 4},
{0, 'O', "disorderRatio", 0, "Insert mode--0: In order, > 0: disorder ratio. Default is in order.", 4}, {0, 'O', "disorderRatio", 0, "Insert mode--0: In order, > 0: disorder ratio. Default is in order.", 4},
{0, 'R', "disorderRang", 0, "Out of order data's range, ms, default is 1000.", 4}, {0, 'R', "disorderRang", 0, "Out of order data's range, ms, default is 1000.", 4},
//{0, 'D', "delete database", 0, "if elete database if exists. 0: no, 1: yes, default is 1", 5}, //{0, 'D', "delete database", 0, "if elete database if exists. 0: no, 1: yes, default is 1", 5},
{0}}; {0}};
/* Parse a single option. */ /* Parse a single option. */
...@@ -529,6 +531,8 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { ...@@ -529,6 +531,8 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
break; break;
case 'x': case 'x':
arguments->insert_only = false; arguments->insert_only = false;
case 'y':
arguments->answer_yes = true;
break; break;
case 'c': case 'c':
if (wordexp(arg, &full_path, 0) != 0) { if (wordexp(arg, &full_path, 0) != 0) {
...@@ -644,6 +648,7 @@ SArguments g_args = {NULL, ...@@ -644,6 +648,7 @@ SArguments g_args = {NULL,
NULL, // sqlFile NULL, // sqlFile
false, // use_metric false, // use_metric
true, // insert_only true, // insert_only
false, // answer_yes;
"./output.txt", // output_file "./output.txt", // output_file
0, // mode : sync or async 0, // mode : sync or async
{ {
...@@ -2535,6 +2540,22 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { ...@@ -2535,6 +2540,22 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else { } else {
printf("failed to read json, threads2 not found"); printf("failed to read json, threads2 not found");
goto PARSE_OVER; goto PARSE_OVER;
}
cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no,
if (answerPrompt && answerPrompt->type == cJSON_String && answerPrompt->valuestring != NULL) {
if (0 == strncasecmp(answerPrompt->valuestring, "yes", 3)) {
g_args.answer_yes = false;
} else if (0 == strncasecmp(answerPrompt->valuestring, "no", 2)) {
g_args.answer_yes = true;
} else {
g_args.answer_yes = false;
}
} else if (!answerPrompt) {
g_args.answer_yes = false;
} else {
printf("failed to read json, confirm_parameter_prompt not found");
goto PARSE_OVER;
} }
cJSON* dbs = cJSON_GetObjectItem(root, "databases"); cJSON* dbs = cJSON_GetObjectItem(root, "databases");
...@@ -3052,6 +3073,22 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { ...@@ -3052,6 +3073,22 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
strncpy(g_queryInfo.password, "taosdata", MAX_DB_NAME_SIZE);; strncpy(g_queryInfo.password, "taosdata", MAX_DB_NAME_SIZE);;
} }
cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no,
if (answerPrompt && answerPrompt->type == cJSON_String && answerPrompt->valuestring != NULL) {
if (0 == strncasecmp(answerPrompt->valuestring, "yes", 3)) {
g_args.answer_yes = false;
} else if (0 == strncasecmp(answerPrompt->valuestring, "no", 2)) {
g_args.answer_yes = true;
} else {
g_args.answer_yes = false;
}
} else if (!answerPrompt) {
g_args.answer_yes = false;
} else {
printf("failed to read json, confirm_parameter_prompt not found");
goto PARSE_OVER;
}
cJSON* dbs = cJSON_GetObjectItem(root, "databases"); cJSON* dbs = cJSON_GetObjectItem(root, "databases");
if (dbs && dbs->type == cJSON_String && dbs->valuestring != NULL) { if (dbs && dbs->type == cJSON_String && dbs->valuestring != NULL) {
strncpy(g_queryInfo.dbName, dbs->valuestring, MAX_DB_NAME_SIZE); strncpy(g_queryInfo.dbName, dbs->valuestring, MAX_DB_NAME_SIZE);
...@@ -4289,9 +4326,11 @@ int insertTestProcess() { ...@@ -4289,9 +4326,11 @@ int insertTestProcess() {
printfInsertMeta(); printfInsertMeta();
printfInsertMetaToFile(g_fpOfInsertResult); printfInsertMetaToFile(g_fpOfInsertResult);
printf("Press enter key to continue\n\n"); if (!g_args.answer_yes) {
(void)getchar(); printf("Press enter key to continue\n\n");
(void)getchar();
}
init_rand_data(); init_rand_data();
// create database and super tables // create database and super tables
...@@ -4469,9 +4508,12 @@ int queryTestProcess() { ...@@ -4469,9 +4508,12 @@ int queryTestProcess() {
} }
printfQueryMeta(); printfQueryMeta();
printf("Press enter key to continue\n\n");
(void)getchar(); if (!g_args.answer_yes) {
printf("Press enter key to continue\n\n");
(void)getchar();
}
printfQuerySystemInfo(taos); printfQuerySystemInfo(taos);
pthread_t *pids = NULL; pthread_t *pids = NULL;
...@@ -4724,8 +4766,10 @@ void *superSubscribeProcess(void *sarg) { ...@@ -4724,8 +4766,10 @@ void *superSubscribeProcess(void *sarg) {
int subscribeTestProcess() { int subscribeTestProcess() {
printfQueryMeta(); printfQueryMeta();
printf("Press enter key to continue\n\n"); if (!g_args.answer_yes) {
(void)getchar(); printf("Press enter key to continue\n\n");
(void)getchar();
}
TAOS * taos = NULL; TAOS * taos = NULL;
taos_init(); taos_init();
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
IF (TD_LINUX) IF (TD_LINUX)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
AUX_SOURCE_DIRECTORY(. SRC) AUX_SOURCE_DIRECTORY(. SRC)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
AUX_SOURCE_DIRECTORY(. SRC) AUX_SOURCE_DIRECTORY(. SRC)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(.) INCLUDE_DIRECTORIES(.)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
AUX_SOURCE_DIRECTORY(. SRC) AUX_SOURCE_DIRECTORY(. SRC)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
AUX_SOURCE_DIRECTORY(. SRC) AUX_SOURCE_DIRECTORY(. SRC)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
ADD_SUBDIRECTORY(monitor) ADD_SUBDIRECTORY(monitor)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/zlib-1.2.11/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/zlib-1.2.11/inc)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(inc)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(inc)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/tsdb/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/tsdb/inc)
......
...@@ -1569,8 +1569,10 @@ static void stddev_dst_function(SQLFunctionCtx *pCtx) { ...@@ -1569,8 +1569,10 @@ static void stddev_dst_function(SQLFunctionCtx *pCtx) {
avg = p->avg; avg = p->avg;
} else { // todo opt performance by using iterator since the timestamp lsit is matched with the output result } else { // todo opt performance by using iterator since the timestamp lsit is matched with the output result
SResPair* p = bsearch(&pCtx->startTs, resList->pData, len, sizeof(SResPair), tsCompare); SResPair* p = bsearch(&pCtx->startTs, resList->pData, len, sizeof(SResPair), tsCompare);
assert(p != NULL); if (p == NULL) {
return;
}
avg = p->avg; avg = p->avg;
} }
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest) FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(inc)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/rpc/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/rpc/inc)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(inc)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
IF (TD_LINUX) IF (TD_LINUX)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(inc)
......
...@@ -227,6 +227,7 @@ void *tsdbFreeFS(STsdbFS *pfs) { ...@@ -227,6 +227,7 @@ void *tsdbFreeFS(STsdbFS *pfs) {
pfs->metaCache = NULL; pfs->metaCache = NULL;
pfs->cstatus = tsdbFreeFSStatus(pfs->cstatus); pfs->cstatus = tsdbFreeFSStatus(pfs->cstatus);
pthread_rwlock_destroy(&(pfs->lock)); pthread_rwlock_destroy(&(pfs->lock));
free(pfs);
} }
return NULL; return NULL;
......
...@@ -562,12 +562,13 @@ void tsdbRefTable(STable *pTable) { ...@@ -562,12 +562,13 @@ void tsdbRefTable(STable *pTable) {
} }
void tsdbUnRefTable(STable *pTable) { void tsdbUnRefTable(STable *pTable) {
int32_t ref = T_REF_DEC(pTable); uint64_t uid = TABLE_UID(pTable);
tsdbDebug("unref table %s uid:%"PRIu64" tid:%d, refCount:%d", TABLE_CHAR_NAME(pTable), TABLE_UID(pTable), TABLE_TID(pTable), ref); int32_t tid = TABLE_TID(pTable);
int32_t ref = T_REF_DEC(pTable);
if (ref == 0) { tsdbDebug("unref table, uid:%" PRIu64 " tid:%d, refCount:%d", uid, tid, ref);
// tsdbDebug("destory table name:%s uid:%"PRIu64", tid:%d", TABLE_CHAR_NAME(pTable), TABLE_UID(pTable), TABLE_TID(pTable));
if (ref == 0) {
if (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE) { if (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE) {
tsdbUnRefTable(pTable->pSuper); tsdbUnRefTable(pTable->pSuper);
} }
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/rpc/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/rpc/inc)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest) FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(inc)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
IF (TD_LINUX) IF (TD_LINUX)
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
# generate release version: # generate release version:
# mkdir release; cd release; cmake -DCMAKE_BUILD_TYPE=Release .. # mkdir release; cd release; cmake -DCMAKE_BUILD_TYPE=Release ..
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
SET(CMAKE_C_STANDARD 11) SET(CMAKE_C_STANDARD 11)
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
IF (TD_LINUX) IF (TD_LINUX)
......
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import random
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import tdDnodes
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
flagList=["debugflag", "cdebugflag", "tmrDebugFlag", "uDebugFlag", "rpcDebugFlag"]
for flag in flagList:
tdSql.execute("alter local %s 131" % flag)
tdSql.execute("alter local %s 135" % flag)
tdSql.execute("alter local %s 143" % flag)
randomFlag = random.randint(100, 250)
if randomFlag != 131 and randomFlag != 135 and randomFlag != 143:
tdSql.error("alter local %s %d" % (flag, randomFlag))
tdSql.query("show dnodes")
dnodeId = tdSql.getData(0, 0)
for flag in flagList:
tdSql.execute("alter dnode %d %s 131" % (dnodeId, flag))
tdSql.execute("alter dnode %d %s 135" % (dnodeId, flag))
tdSql.execute("alter dnode %d %s 143" % (dnodeId, flag))
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
...@@ -126,7 +126,6 @@ class TDTestCase: ...@@ -126,7 +126,6 @@ class TDTestCase:
for i in range(2, size): for i in range(2, size):
tdSql.checkData(0, i, self.rowNum * (size - i)) tdSql.checkData(0, i, self.rowNum * (size - i))
tdSql.error("alter local debugflag 143")
tdSql.execute("create table st(ts timestamp, c1 int) tags(t1 float)") tdSql.execute("create table st(ts timestamp, c1 int) tags(t1 float)")
tdSql.execute("create table t0 using st tags(null)") tdSql.execute("create table t0 using st tags(null)")
......
...@@ -193,6 +193,7 @@ python3 ./test.py -f stream/table_n.py ...@@ -193,6 +193,7 @@ python3 ./test.py -f stream/table_n.py
#alter table #alter table
python3 ./test.py -f alter/alter_table_crash.py python3 ./test.py -f alter/alter_table_crash.py
python3 ./test.py -f alter/alter_table.py python3 ./test.py -f alter/alter_table.py
python3 ./test.py -f alter/alter_debugFlag.py
# client # client
python3 ./test.py -f client/client.py python3 ./test.py -f client/client.py
......
CMAKE_MINIMUM_REQUIRED(VERSION 3.5) CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine) PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册