提交 6f5bafcd 编写于 作者: L lichuang

Merge branch 'develop' into feature/TD-4034

[![Build Status](https://travis-ci.org/taosdata/TDengine.svg?branch=master)](https://travis-ci.org/taosdata/TDengine)
[![Build Status](https://cloud.drone.io/api/badges/taosdata/TDengine/status.svg?ref=refs/heads/master)](https://cloud.drone.io/taosdata/TDengine)
[![Build status](https://ci.appveyor.com/api/projects/status/kf3pwh2or5afsgl9/branch/master?svg=true)](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master)
[![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=develop)](https://coveralls.io/github/taosdata/TDengine?branch=develop)
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4201/badge)](https://bestpractices.coreinfrastructure.org/projects/4201)
......
......@@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
IF (TD_MVN_INSTALLED)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.28-dist.jar DESTINATION connector/jdbc)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.29.jar DESTINATION connector/jdbc)
ENDIF ()
ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
......
......@@ -36,6 +36,15 @@ static char monotonic_info_string[32];
static long mono_ticksPerMicrosecond = 0;
#ifdef _TD_NINGSI_60
// implement __rdtsc in ningsi60
uint64_t __rdtsc(){
unsigned int lo,hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return ((uint64_t)hi << 32) | lo;
}
#endif
static monotime getMonotonicUs_x86() {
return __rdtsc() / mono_ticksPerMicrosecond;
}
......
name: tdengine
base: core18
version: '2.1.0.0'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
......
......@@ -48,6 +48,8 @@ void tscLockByThread(int64_t *lockedBy);
void tscUnlockByThread(int64_t *lockedBy);
int tsInsertInitialCheck(SSqlObj *pSql);
#ifdef __cplusplus
}
#endif
......
......@@ -174,7 +174,8 @@ void tscFieldInfoClear(SFieldInfo* pFieldInfo);
static FORCE_INLINE int32_t tscNumOfFields(SQueryInfo* pQueryInfo) { return pQueryInfo->fieldsInfo.numOfOutput; }
int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2);
int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2, int32_t *diffSize);
int32_t tscFieldInfoSetSize(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2);
void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes);
......@@ -306,7 +307,7 @@ STableMeta* createSuperTableMeta(STableMetaMsg* pChild);
uint32_t tscGetTableMetaSize(STableMeta* pTableMeta);
CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta);
uint32_t tscGetTableMetaMaxSize();
int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name);
int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name, void* buf);
STableMeta* tscTableMetaDup(STableMeta* pTableMeta);
int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr, void* addr);
......
......@@ -21,8 +21,8 @@ extern "C" {
#endif
#include "taosmsg.h"
#include "tstoken.h"
#include "tsclient.h"
#include "ttoken.h"
/**
* get the number of tags of this table
......
......@@ -84,6 +84,7 @@ typedef struct STableMeta {
typedef struct STableMetaInfo {
STableMeta *pTableMeta; // table meta, cached in client side and acquired by name
uint32_t tableMetaSize;
SVgroupsInfo *vgroupList;
SArray *pVgroupTables; // SArray<SVgroupTableInfo>
......@@ -154,13 +155,12 @@ typedef struct STagCond {
typedef struct SParamInfo {
int32_t idx;
char type;
uint8_t type;
uint8_t timePrec;
int16_t bytes;
uint32_t offset;
} SParamInfo;
typedef struct SBoundColumn {
bool hasVal; // denote if current column has bound or not
int32_t offset; // all column offset value
......@@ -372,7 +372,8 @@ typedef struct SSqlObj {
tsem_t rspSem;
SSqlCmd cmd;
SSqlRes res;
bool isBind;
SSubqueryState subState;
struct SSqlObj **pSubs;
......
......@@ -100,7 +100,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultSetImp
/*
* Class: com_taosdata_jdbc_TSDBJNIConnector
* Method: isUpdateQueryImp
* Signature: (J)J
* Signature: (JJ)I
*/
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_isUpdateQueryImp
(JNIEnv *env, jobject jobj, jlong con, jlong tres);
......@@ -185,6 +185,44 @@ JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_unsubscribeImp
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_validateCreateTableSqlImp
(JNIEnv *, jobject, jlong, jbyteArray);
/*
* Class: com_taosdata_jdbc_TSDBJNIConnector
* Method: prepareStmtImp
* Signature: ([BJ)I
*/
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_prepareStmtImp
(JNIEnv *, jobject, jbyteArray, jlong);
/*
* Class: com_taosdata_jdbc_TSDBJNIConnector
* Method: setBindTableNameImp
* Signature: (JLjava/lang/String;J)I
*/
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameImp
(JNIEnv *, jobject, jlong, jstring, jlong);
/*
* Class: com_taosdata_jdbc_TSDBJNIConnector
* Method: bindColDataImp
* Signature: (J[B[B[BIIIIJ)J
*/
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp
(JNIEnv *, jobject, jlong, jbyteArray, jbyteArray, jbyteArray, jint, jint, jint, jint, jlong);
/*
* Class: com_taosdata_jdbc_TSDBJNIConnector
* Method: executeBatchImp
* Signature: (JJ)I
*/
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeBatchImp(JNIEnv *env, jobject jobj, jlong stmt, jlong con);
/*
* Class: com_taosdata_jdbc_TSDBJNIConnector
* Method: executeBatchImp
* Signature: (JJ)I
*/
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv *env, jobject jobj, jlong stmt, jlong con);
#ifdef __cplusplus
}
#endif
......
......@@ -687,4 +687,194 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TDDBJNIConnector_getResultTimePrec
}
return taos_result_precision(result);
}
\ No newline at end of file
}
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_prepareStmtImp(JNIEnv *env, jobject jobj, jbyteArray jsql, jlong con) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection already closed", jobj);
return JNI_CONNECTION_NULL;
}
if (jsql == NULL) {
jniError("jobj:%p, conn:%p, empty sql string", jobj, tscon);
return JNI_SQL_NULL;
}
jsize len = (*env)->GetArrayLength(env, jsql);
char *str = (char *) calloc(1, sizeof(char) * (len + 1));
if (str == NULL) {
jniError("jobj:%p, conn:%p, alloc memory failed", jobj, tscon);
return JNI_OUT_OF_MEMORY;
}
(*env)->GetByteArrayRegion(env, jsql, 0, len, (jbyte *)str);
if ((*env)->ExceptionCheck(env)) {
// todo handle error
}
TAOS_STMT* pStmt = taos_stmt_init(tscon);
int32_t code = taos_stmt_prepare(pStmt, str, len);
if (code != TSDB_CODE_SUCCESS) {
jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
return JNI_TDENGINE_ERROR;
}
free(str);
return (jlong) pStmt;
}
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameImp(JNIEnv *env, jobject jobj, jlong stmt, jstring jname, jlong conn) {
TAOS *tsconn = (TAOS *)conn;
if (tsconn == NULL) {
jniError("jobj:%p, connection already closed", jobj);
return JNI_CONNECTION_NULL;
}
TAOS_STMT* pStmt = (TAOS_STMT*) stmt;
if (pStmt == NULL) {
jniError("jobj:%p, conn:%p, invalid stmt handle", jobj, tsconn);
return JNI_SQL_NULL;
}
const char *name = (*env)->GetStringUTFChars(env, jname, NULL);
int32_t code = taos_stmt_set_tbname((void*)stmt, name);
if (code != TSDB_CODE_SUCCESS) {
(*env)->ReleaseStringUTFChars(env, jname, name);
jniError("jobj:%p, conn:%p, code:%s", jobj, tsconn, tstrerror(code));
return JNI_TDENGINE_ERROR;
}
jniDebug("jobj:%p, conn:%p, set stmt bind table name:%s", jobj, tsconn, name);
(*env)->ReleaseStringUTFChars(env, jname, name);
return JNI_SUCCESS;
}
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp(JNIEnv *env, jobject jobj, jlong stmt,
jbyteArray colDataList, jbyteArray lengthList, jbyteArray nullList, jint dataType, jint dataBytes, jint numOfRows, jint colIndex, jlong con) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection already closed", jobj);
return JNI_CONNECTION_NULL;
}
TAOS_STMT* pStmt = (TAOS_STMT*) stmt;
if (pStmt == NULL) {
jniError("jobj:%p, conn:%p, invalid stmt", jobj, tscon);
return JNI_SQL_NULL;
}
// todo refactor
jsize len = (*env)->GetArrayLength(env, colDataList);
char *colBuf = (char *)calloc(1, len);
(*env)->GetByteArrayRegion(env, colDataList, 0, len, (jbyte *)colBuf);
if ((*env)->ExceptionCheck(env)) {
// todo handle error
}
len = (*env)->GetArrayLength(env, lengthList);
char *lengthArray = (char*) calloc(1, len);
(*env)->GetByteArrayRegion(env, lengthList, 0, len, (jbyte*) lengthArray);
if ((*env)->ExceptionCheck(env)) {
}
len = (*env)->GetArrayLength(env, nullList);
char *nullArray = (char*) calloc(1, len);
(*env)->GetByteArrayRegion(env, nullList, 0, len, (jbyte*) nullArray);
if ((*env)->ExceptionCheck(env)) {
}
// bind multi-rows with only one invoke.
TAOS_MULTI_BIND* b = calloc(1, sizeof(TAOS_MULTI_BIND));
b->num = numOfRows;
b->buffer_type = dataType; // todo check data type
b->buffer_length = IS_VAR_DATA_TYPE(dataType)? dataBytes:tDataTypes[dataType].bytes;
b->is_null = nullArray;
b->buffer = colBuf;
b->length = (int32_t*)lengthArray;
// set the length and is_null array
switch(dataType) {
case TSDB_DATA_TYPE_INT:
case TSDB_DATA_TYPE_TINYINT:
case TSDB_DATA_TYPE_SMALLINT:
case TSDB_DATA_TYPE_TIMESTAMP:
case TSDB_DATA_TYPE_BIGINT: {
int32_t bytes = tDataTypes[dataType].bytes;
for(int32_t i = 0; i < numOfRows; ++i) {
b->length[i] = bytes;
}
break;
}
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_BINARY: {
// do nothing
}
}
int32_t code = taos_stmt_bind_single_param_batch(pStmt, b, colIndex);
tfree(b->length);
tfree(b->buffer);
tfree(b->is_null);
tfree(b);
if (code != TSDB_CODE_SUCCESS) {
jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
return JNI_TDENGINE_ERROR;
}
return JNI_SUCCESS;
}
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeBatchImp(JNIEnv *env, jobject jobj, jlong stmt, jlong con) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection already closed", jobj);
return JNI_CONNECTION_NULL;
}
TAOS_STMT *pStmt = (TAOS_STMT*) stmt;
if (pStmt == NULL) {
jniError("jobj:%p, conn:%p, invalid stmt", jobj, tscon);
return JNI_SQL_NULL;
}
taos_stmt_add_batch(pStmt);
int32_t code = taos_stmt_execute(pStmt);
if (code != TSDB_CODE_SUCCESS) {
jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
return JNI_TDENGINE_ERROR;
}
jniDebug("jobj:%p, conn:%p, batch execute", jobj, tscon);
return JNI_SUCCESS;
}
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv *env, jobject jobj, jlong stmt, jlong con) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection already closed", jobj);
return JNI_CONNECTION_NULL;
}
TAOS_STMT *pStmt = (TAOS_STMT*) stmt;
if (pStmt == NULL) {
jniError("jobj:%p, conn:%p, invalid stmt", jobj, tscon);
return JNI_SQL_NULL;
}
int32_t code = taos_stmt_close(pStmt);
if (code != TSDB_CODE_SUCCESS) {
jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
return JNI_TDENGINE_ERROR;
}
jniDebug("jobj:%p, conn:%p, stmt closed", jobj, tscon);
return JNI_SUCCESS;
}
......@@ -326,6 +326,7 @@ TAOS_ROW tscFetchRow(void *param) {
pCmd->command == TSDB_SQL_FETCH ||
pCmd->command == TSDB_SQL_SHOW ||
pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE ||
pCmd->command == TSDB_SQL_SHOW_CREATE_STABLE ||
pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE ||
pCmd->command == TSDB_SQL_SELECT ||
pCmd->command == TSDB_SQL_DESCRIBE_TABLE ||
......@@ -679,6 +680,9 @@ static int32_t tscProcessShowCreateTable(SSqlObj *pSql) {
assert(pTableMetaInfo->pTableMeta != NULL);
const char* tableName = tNameGetTableName(&pTableMetaInfo->name);
if (pSql->cmd.command == TSDB_SQL_SHOW_CREATE_STABLE && !UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
return TSDB_CODE_TSC_INVALID_VALUE;
}
char *result = (char *)calloc(1, TSDB_MAX_BINARY_LEN);
int32_t code = TSDB_CODE_SUCCESS;
......@@ -907,7 +911,7 @@ int tscProcessLocalCmd(SSqlObj *pSql) {
*/
pRes->qId = 0x1;
pRes->numOfRows = 0;
} else if (pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE) {
} else if (pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE || pCmd->command == TSDB_SQL_SHOW_CREATE_STABLE) {
pRes->code = tscProcessShowCreateTable(pSql);
} else if (pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE) {
pRes->code = tscProcessShowCreateDatabase(pSql);
......
......@@ -29,8 +29,7 @@
#include "taosdef.h"
#include "tscLog.h"
#include "tscSubquery.h"
#include "tstoken.h"
#include "ttoken.h"
#include "tdataformat.h"
......@@ -386,7 +385,7 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
* The server time/client time should not be mixed up in one sql string
* Do not employ sort operation is not involved if server time is used.
*/
static int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start) {
int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start) {
// once the data block is disordered, we do NOT keep previous timestamp any more
if (!pDataBlocks->ordered) {
return TSDB_CODE_SUCCESS;
......@@ -411,6 +410,7 @@ static int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start
if (k <= pDataBlocks->prevTS && (pDataBlocks->tsSource == TSDB_USE_CLI_TS)) {
pDataBlocks->ordered = false;
tscWarn("NOT ordered input timestamp");
}
pDataBlocks->prevTS = k;
......@@ -463,23 +463,24 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, SSqlCmd *pCmd, int1
// Remove quotation marks
if (TK_STRING == sToken.type) {
// delete escape character: \\, \', \"
char delim = sToken.z[0];
char delim = sToken.z[0];
int32_t cnt = 0;
int32_t j = 0;
for (uint32_t k = 1; k < sToken.n - 1; ++k) {
if (sToken.z[k] == delim || sToken.z[k] == '\\') {
if (sToken.z[k + 1] == delim) {
cnt++;
if (sToken.z[k] == '\\' || (sToken.z[k] == delim && sToken.z[k + 1] == delim)) {
tmpTokenBuf[j] = sToken.z[k + 1];
j++;
k++;
continue;
}
cnt++;
j++;
k++;
continue;
}
tmpTokenBuf[j] = sToken.z[k];
j++;
}
tmpTokenBuf[j] = 0;
sToken.z = tmpTokenBuf;
sToken.n -= 2 + cnt;
......@@ -693,6 +694,8 @@ void tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf) {
pBlocks->numOfRows = i + 1;
dataBuf->size = sizeof(SSubmitBlk) + dataBuf->rowSize * pBlocks->numOfRows;
}
dataBuf->prevTS = INT64_MIN;
}
static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, STableDataBlocks* dataBuf, int32_t *totalNum) {
......@@ -705,19 +708,11 @@ static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, STableDataBlock
}
code = TSDB_CODE_TSC_INVALID_SQL;
char *tmpTokenBuf = calloc(1, 16*1024); // used for deleting Escape character: \\, \', \"
if (NULL == tmpTokenBuf) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
char tmpTokenBuf[16*1024] = {0}; // used for deleting Escape character: \\, \', \"
int32_t numOfRows = 0;
code = tsParseValues(str, dataBuf, maxNumOfRows, pCmd, &numOfRows, tmpTokenBuf);
free(tmpTokenBuf);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
for (uint32_t i = 0; i < dataBuf->numOfParams; ++i) {
SParamInfo *param = dataBuf->params + i;
if (param->idx == -1) {
......@@ -934,6 +929,42 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
return tscSQLSyntaxErrMsg(pCmd->payload, ") expected", sToken.z);
}
/* parse columns after super table tags values.
* insert into table_name using super_table(tag_name1, tag_name2) tags(tag_val1, tag_val2)
* (normal_col1, normal_col2) values(normal_col1_val, normal_col2_val);
* */
index = 0;
sToken = tStrGetToken(sql, &index, false);
sql += index;
int numOfColsAfterTags = 0;
if (sToken.type == TK_LP) {
if (*boundColumn != NULL) {
return tscSQLSyntaxErrMsg(pCmd->payload, "bind columns again", sToken.z);
} else {
*boundColumn = &sToken.z[0];
}
while (1) {
index = 0;
sToken = tStrGetToken(sql, &index, false);
if (sToken.type == TK_RP) {
break;
}
sql += index;
++numOfColsAfterTags;
}
if (numOfColsAfterTags == 0 && (*boundColumn) != NULL) {
return TSDB_CODE_TSC_INVALID_SQL;
}
sToken = tStrGetToken(sql, &index, false);
}
sql = sToken.z;
if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(pCmd->payload, "invalid table name", *sqlstr);
}
......@@ -975,7 +1006,7 @@ int validateTableName(char *tblName, int len, SStrToken* psTblToken) {
psTblToken->n = len;
psTblToken->type = TK_ID;
tSQLGetToken(psTblToken->z, &psTblToken->type);
tGetToken(psTblToken->z, &psTblToken->type);
return tscValidateName(psTblToken);
}
......@@ -1262,7 +1293,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
goto _clean;
}
if (taosHashGetSize(pCmd->pTableBlockHashList) > 0) { // merge according to vgId
if ((pCmd->insertType != TSDB_QUERY_TYPE_STMT_INSERT) && taosHashGetSize(pCmd->pTableBlockHashList) > 0) { // merge according to vgId
if ((code = tscMergeTableDataBlocks(pSql, true)) != TSDB_CODE_SUCCESS) {
goto _clean;
}
......
此差异已折叠。
......@@ -21,19 +21,19 @@
#endif // __APPLE__
#include "os.h"
#include "ttype.h"
#include "texpr.h"
#include "taos.h"
#include "taosmsg.h"
#include "tcompare.h"
#include "texpr.h"
#include "tname.h"
#include "tscLog.h"
#include "tscUtil.h"
#include "tschemautil.h"
#include "tsclient.h"
#include "tstoken.h"
#include "tstrbuild.h"
#include "ttoken.h"
#include "ttokendef.h"
#include "ttype.h"
#include "qUtil.h"
#define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0"
......@@ -432,7 +432,6 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
// additional msg has been attached already
code = tscSetTableFullName(pTableMetaInfo, pToken, pSql);
if (code != TSDB_CODE_SUCCESS) {
......@@ -441,6 +440,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return tscGetTableMeta(pSql, pTableMetaInfo);
}
case TSDB_SQL_SHOW_CREATE_STABLE:
case TSDB_SQL_SHOW_CREATE_TABLE: {
const char* msg1 = "invalid table name";
......@@ -635,18 +635,26 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
// set the command/global limit parameters from the first subclause to the sqlcmd object
SQueryInfo* pQueryInfo1 = tscGetQueryInfo(pCmd, 0);
pCmd->command = pQueryInfo1->command;
int32_t diffSize = 0;
// if there is only one element, the limit of clause is the limit of global result.
// validate the select node for "UNION ALL" subclause
for (int32_t i = 1; i < pCmd->numOfClause; ++i) {
SQueryInfo* pQueryInfo2 = tscGetQueryInfo(pCmd, i);
int32_t ret = tscFieldInfoCompare(&pQueryInfo1->fieldsInfo, &pQueryInfo2->fieldsInfo);
int32_t ret = tscFieldInfoCompare(&pQueryInfo1->fieldsInfo, &pQueryInfo2->fieldsInfo, &diffSize);
if (ret != 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
if (diffSize) {
for (int32_t i = 1; i < pCmd->numOfClause; ++i) {
SQueryInfo* pQueryInfo2 = tscGetQueryInfo(pCmd, i);
tscFieldInfoSetSize(&pQueryInfo1->fieldsInfo, &pQueryInfo2->fieldsInfo);
}
}
pCmd->parseFinished = 1;
return TSDB_CODE_SUCCESS; // do not build query message here
}
......@@ -975,11 +983,10 @@ int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pTableNam
const char* msg3 = "no acctId";
const char* msg4 = "db name too long";
const char* msg5 = "table name too long";
SSqlCmd* pCmd = &pSql->cmd;
int32_t code = TSDB_CODE_SUCCESS;
int32_t idx = getDelimiterIndex(pTableName);
int32_t idx = getDelimiterIndex(pTableName);
if (idx != -1) { // db has been specified in sql string so we ignore current db path
char* acctId = getAccountId(pSql);
if (acctId == NULL || strlen(acctId) <= 0) {
......@@ -993,9 +1000,9 @@ int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pTableNam
if (idx >= TSDB_DB_NAME_LEN) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
if (pTableName->n - 1 - idx >= TSDB_TABLE_NAME_LEN) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
char name[TSDB_TABLE_FNAME_LEN] = {0};
......@@ -1342,8 +1349,8 @@ static char* cloneCurrentDBName(SSqlObj* pSql) {
/* length limitation, strstr cannot be applied */
static int32_t getDelimiterIndex(SStrToken* pTableName) {
for (uint32_t i = 0; i < pTableName->n; ++i) {
if (pTableName->z[i] == TS_PATH_DELIMITER[0]) {
for (uint32_t i = 0; i < pTableName->n; ++i) {
if (pTableName->z[i] == TS_PATH_DELIMITER[0]) {
return i;
}
}
......@@ -1606,11 +1613,27 @@ bool isValidDistinctSql(SQueryInfo* pQueryInfo) {
return false;
}
static bool hasNoneUserDefineExpr(SQueryInfo* pQueryInfo) {
size_t numOfExprs = taosArrayGetSize(pQueryInfo->exprList);
for (int32_t i = 0; i < numOfExprs; ++i) {
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, i);
if (TSDB_COL_IS_UD_COL(pExpr->colInfo.flag)) {
continue;
}
return true;
}
return false;
}
int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelNodeList, bool isSTable, bool joinQuery,
bool timeWindowQuery) {
assert(pSelNodeList != NULL && pCmd != NULL);
const char* msg1 = "too many items in selection clause";
const char* msg2 = "functions or others can not be mixed up";
const char* msg3 = "not support query expression";
const char* msg4 = "only support distinct one tag";
......@@ -1675,7 +1698,7 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
// there is only one user-defined column in the final result field, add the timestamp column.
size_t numOfSrcCols = taosArrayGetSize(pQueryInfo->colList);
if (numOfSrcCols <= 0 && !tscQueryTags(pQueryInfo) && !tscQueryBlockInfo(pQueryInfo)) {
if ((numOfSrcCols <= 0 || !hasNoneUserDefineExpr(pQueryInfo)) && !tscQueryTags(pQueryInfo) && !tscQueryBlockInfo(pQueryInfo)) {
addPrimaryTsColIntoResult(pQueryInfo);
}
......@@ -4621,7 +4644,7 @@ int32_t getTimeRange(STimeWindow* win, tSqlExpr* pRight, int32_t optr, int16_t t
}
} else {
SStrToken token = {.z = pRight->value.pz, .n = pRight->value.nLen, .type = TK_ID};
int32_t len = tSQLGetToken(pRight->value.pz, &token.type);
int32_t len = tGetToken(pRight->value.pz, &token.type);
if ((token.type != TK_INTEGER && token.type != TK_FLOAT) || len != pRight->value.nLen) {
return TSDB_CODE_TSC_INVALID_SQL;
......@@ -5508,13 +5531,13 @@ int32_t validateLocalConfig(SMiscInfo* pOptions) {
}
int32_t validateColumnName(char* name) {
bool ret = isKeyWord(name, (int32_t)strlen(name));
bool ret = taosIsKeyWordToken(name, (int32_t)strlen(name));
if (ret) {
return TSDB_CODE_TSC_INVALID_SQL;
}
SStrToken token = {.z = name};
token.n = tSQLGetToken(name, &token.type);
token.n = tGetToken(name, &token.type);
if (token.type != TK_STRING && token.type != TK_ID) {
return TSDB_CODE_TSC_INVALID_SQL;
......@@ -5525,7 +5548,7 @@ int32_t validateColumnName(char* name) {
strntolower(token.z, token.z, token.n);
token.n = (uint32_t)strtrim(token.z);
int32_t k = tSQLGetToken(token.z, &token.type);
int32_t k = tGetToken(token.z, &token.type);
if (k != token.n) {
return TSDB_CODE_TSC_INVALID_SQL;
}
......@@ -7502,4 +7525,3 @@ bool hasNormalColumnFilter(SQueryInfo* pQueryInfo) {
return false;
}
......@@ -1881,6 +1881,8 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
return TSDB_CODE_TSC_INVALID_VALUE;
}
assert(pTableMeta->tableType == TSDB_SUPER_TABLE || pTableMeta->tableType == TSDB_CHILD_TABLE || pTableMeta->tableType == TSDB_NORMAL_TABLE || pTableMeta->tableType == TSDB_STREAM_TABLE);
if (pTableMeta->tableType == TSDB_CHILD_TABLE) {
// check if super table hashmap or not
int32_t len = (int32_t) strnlen(pTableMeta->sTableName, TSDB_TABLE_FNAME_LEN);
......@@ -2441,10 +2443,23 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn
int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
assert(tIsValidName(&pTableMetaInfo->name));
tfree(pTableMetaInfo->pTableMeta);
uint32_t size = tscGetTableMetaMaxSize();
pTableMetaInfo->pTableMeta = calloc(1, size);
if (pTableMetaInfo->pTableMeta == NULL) {
pTableMetaInfo->pTableMeta = calloc(1, size);
pTableMetaInfo->tableMetaSize = size;
} else if (pTableMetaInfo->tableMetaSize < size) {
char *tmp = realloc(pTableMetaInfo->pTableMeta, size);
if (tmp == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
pTableMetaInfo->pTableMeta = (STableMeta *)tmp;
memset(pTableMetaInfo->pTableMeta, 0, size);
pTableMetaInfo->tableMetaSize = size;
} else {
//uint32_t s = tscGetTableMetaSize(pTableMetaInfo->pTableMeta);
memset(pTableMetaInfo->pTableMeta, 0, size);
pTableMetaInfo->tableMetaSize = size;
}
pTableMetaInfo->pTableMeta->tableType = -1;
pTableMetaInfo->pTableMeta->tableInfo.numOfColumns = -1;
......@@ -2456,10 +2471,13 @@ int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
taosHashGetClone(tscTableMetaInfo, name, len, NULL, pTableMetaInfo->pTableMeta, -1);
// TODO resize the tableMeta
char buf[80*1024] = {0};
assert(size < 80*1024);
STableMeta* pMeta = pTableMetaInfo->pTableMeta;
if (pMeta->id.uid > 0) {
if (pMeta->tableType == TSDB_CHILD_TABLE) {
int32_t code = tscCreateTableMetaFromCChildMeta(pTableMetaInfo->pTableMeta, name);
int32_t code = tscCreateTableMetaFromCChildMeta(pTableMetaInfo->pTableMeta, name, buf);
if (code != TSDB_CODE_SUCCESS) {
return getTableMetaFromMnode(pSql, pTableMetaInfo);
}
......@@ -2641,6 +2659,7 @@ void tscInitMsgsFp() {
tscProcessMsgRsp[TSDB_SQL_ALTER_DB] = tscProcessAlterDbMsgRsp;
tscProcessMsgRsp[TSDB_SQL_SHOW_CREATE_TABLE] = tscProcessShowCreateRsp;
tscProcessMsgRsp[TSDB_SQL_SHOW_CREATE_STABLE] = tscProcessShowCreateRsp;
tscProcessMsgRsp[TSDB_SQL_SHOW_CREATE_DATABASE] = tscProcessShowCreateRsp;
tscKeepConn[TSDB_SQL_SHOW] = 1;
......
......@@ -457,6 +457,7 @@ static bool needToFetchNewBlock(SSqlObj* pSql) {
pCmd->command == TSDB_SQL_FETCH ||
pCmd->command == TSDB_SQL_SHOW ||
pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE ||
pCmd->command == TSDB_SQL_SHOW_CREATE_STABLE ||
pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE ||
pCmd->command == TSDB_SQL_SELECT ||
pCmd->command == TSDB_SQL_DESCRIBE_TABLE ||
......@@ -962,7 +963,7 @@ static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t t
len = (int32_t)strtrim(tblName);
SStrToken sToken = {.n = len, .type = TK_ID, .z = tblName};
tSQLGetToken(tblName, &sToken.type);
tGetToken(tblName, &sToken.type);
// Check if the table name available or not
if (tscValidateName(&sToken) != TSDB_CODE_SUCCESS) {
......
......@@ -637,7 +637,13 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
int16_t colId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->id.uid);
// set the tag column id for executor to extract correct tag value
#ifndef _TD_NINGSI_60
pExpr->base.param[0] = (tVariant) {.i64 = colId, .nType = TSDB_DATA_TYPE_BIGINT, .nLen = sizeof(int64_t)};
#else
pExpr->base.param[0].i64 = colId;
pExpr->base.param[0].nType = TSDB_DATA_TYPE_BIGINT;
pExpr->base.param[0].nLen = sizeof(int64_t);
#endif
pExpr->base.numOfParams = 1;
}
......
......@@ -1038,7 +1038,8 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) {
tfree(pTableMetaInfo->pTableMeta);
}
pTableMetaInfo->pTableMeta = tscTableMetaDup(pDataBlock->pTableMeta);
pTableMetaInfo->pTableMeta = tscTableMetaDup(pDataBlock->pTableMeta);
pTableMetaInfo->tableMetaSize = tscGetTableMetaSize(pDataBlock->pTableMeta);
}
/*
......@@ -1255,67 +1256,73 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
STableDataBlocks* pOneTableBlock = *p;
while(pOneTableBlock) {
// the maximum expanded size in byte when a row-wise data is converted to SDataRow format
int32_t expandSize = getRowExpandSize(pOneTableBlock->pTableMeta);
STableDataBlocks* dataBuf = NULL;
int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE,
INSERT_HEAD_SIZE, 0, &pOneTableBlock->tableName, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList);
if (ret != TSDB_CODE_SUCCESS) {
tscError("0x%"PRIx64" failed to prepare the data block buffer for merging table data, code:%d", pSql->self, ret);
taosHashCleanup(pVnodeDataBlockHashList);
tscDestroyBlockArrayList(pVnodeDataBlockList);
return ret;
}
SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData;
int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
if (dataBuf->nAllocSize < destSize) {
while (dataBuf->nAllocSize < destSize) {
dataBuf->nAllocSize = (uint32_t)(dataBuf->nAllocSize * 1.5);
if (pBlocks->numOfRows > 0) {
// the maximum expanded size in byte when a row-wise data is converted to SDataRow format
int32_t expandSize = getRowExpandSize(pOneTableBlock->pTableMeta);
STableDataBlocks* dataBuf = NULL;
int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE,
INSERT_HEAD_SIZE, 0, &pOneTableBlock->tableName, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList);
if (ret != TSDB_CODE_SUCCESS) {
tscError("0x%"PRIx64" failed to prepare the data block buffer for merging table data, code:%d", pSql->self, ret);
taosHashCleanup(pVnodeDataBlockHashList);
tscDestroyBlockArrayList(pVnodeDataBlockList);
return ret;
}
char* tmp = realloc(dataBuf->pData, dataBuf->nAllocSize);
if (tmp != NULL) {
dataBuf->pData = tmp;
memset(dataBuf->pData + dataBuf->size, 0, dataBuf->nAllocSize - dataBuf->size);
} else { // failed to allocate memory, free already allocated memory and return error code
tscError("0x%"PRIx64" failed to allocate memory for merging submit block, size:%d", pSql->self, dataBuf->nAllocSize);
int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
taosHashCleanup(pVnodeDataBlockHashList);
tscDestroyBlockArrayList(pVnodeDataBlockList);
tfree(dataBuf->pData);
if (dataBuf->nAllocSize < destSize) {
while (dataBuf->nAllocSize < destSize) {
dataBuf->nAllocSize = (uint32_t)(dataBuf->nAllocSize * 1.5);
}
char* tmp = realloc(dataBuf->pData, dataBuf->nAllocSize);
if (tmp != NULL) {
dataBuf->pData = tmp;
memset(dataBuf->pData + dataBuf->size, 0, dataBuf->nAllocSize - dataBuf->size);
} else { // failed to allocate memory, free already allocated memory and return error code
tscError("0x%"PRIx64" failed to allocate memory for merging submit block, size:%d", pSql->self, dataBuf->nAllocSize);
taosHashCleanup(pVnodeDataBlockHashList);
tscDestroyBlockArrayList(pVnodeDataBlockList);
tfree(dataBuf->pData);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
}
}
tscSortRemoveDataBlockDupRows(pOneTableBlock);
char* ekey = (char*)pBlocks->data + pOneTableBlock->rowSize*(pBlocks->numOfRows-1);
tscSortRemoveDataBlockDupRows(pOneTableBlock);
char* ekey = (char*)pBlocks->data + pOneTableBlock->rowSize*(pBlocks->numOfRows-1);
tscDebug("0x%"PRIx64" name:%s, name:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql->self, tNameGetTableName(&pOneTableBlock->tableName),
pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey));
tscDebug("0x%"PRIx64" name:%s, name:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql->self, tNameGetTableName(&pOneTableBlock->tableName),
pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey));
int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
pBlocks->tid = htonl(pBlocks->tid);
pBlocks->uid = htobe64(pBlocks->uid);
pBlocks->sversion = htonl(pBlocks->sversion);
pBlocks->numOfRows = htons(pBlocks->numOfRows);
pBlocks->schemaLen = 0;
pBlocks->tid = htonl(pBlocks->tid);
pBlocks->uid = htobe64(pBlocks->uid);
pBlocks->sversion = htonl(pBlocks->sversion);
pBlocks->numOfRows = htons(pBlocks->numOfRows);
pBlocks->schemaLen = 0;
// erase the empty space reserved for binary data
int32_t finalLen = trimDataBlock(dataBuf->pData + dataBuf->size, pOneTableBlock, pCmd->submitSchema);
assert(finalLen <= len);
// erase the empty space reserved for binary data
int32_t finalLen = trimDataBlock(dataBuf->pData + dataBuf->size, pOneTableBlock, pCmd->submitSchema);
assert(finalLen <= len);
dataBuf->size += (finalLen + sizeof(SSubmitBlk));
assert(dataBuf->size <= dataBuf->nAllocSize);
dataBuf->size += (finalLen + sizeof(SSubmitBlk));
assert(dataBuf->size <= dataBuf->nAllocSize);
// the length does not include the SSubmitBlk structure
pBlocks->dataLen = htonl(finalLen);
dataBuf->numOfTables += 1;
// the length does not include the SSubmitBlk structure
pBlocks->dataLen = htonl(finalLen);
dataBuf->numOfTables += 1;
pBlocks->numOfRows = 0;
}else {
tscDebug("0x%"PRIx64" table %s data block is empty", pSql->self, pOneTableBlock->tableName.tname);
}
p = taosHashIterate(pCmd->pTableBlockHashList, p);
if (p == NULL) {
break;
......@@ -1437,7 +1444,7 @@ int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index) {
return pInfo->pExpr->base.offset;
}
int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2) {
int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2, int32_t *diffSize) {
assert(pFieldInfo1 != NULL && pFieldInfo2 != NULL);
if (pFieldInfo1->numOfOutput != pFieldInfo2->numOfOutput) {
......@@ -1449,15 +1456,36 @@ int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFi
TAOS_FIELD* pField2 = tscFieldInfoGetField((SFieldInfo*) pFieldInfo2, i);
if (pField1->type != pField2->type ||
pField1->bytes != pField2->bytes ||
strcasecmp(pField1->name, pField2->name) != 0) {
return 1;
}
if (pField1->bytes != pField2->bytes) {
*diffSize = 1;
if (pField2->bytes > pField1->bytes) {
pField1->bytes = pField2->bytes;
}
}
}
return 0;
}
int32_t tscFieldInfoSetSize(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2) {
assert(pFieldInfo1 != NULL && pFieldInfo2 != NULL);
for (int32_t i = 0; i < pFieldInfo1->numOfOutput; ++i) {
TAOS_FIELD* pField1 = tscFieldInfoGetField((SFieldInfo*) pFieldInfo1, i);
TAOS_FIELD* pField2 = tscFieldInfoGetField((SFieldInfo*) pFieldInfo2, i);
pField2->bytes = pField1->bytes;
}
return 0;
}
int32_t tscGetResRowLength(SArray* pExprList) {
size_t num = taosArrayGetSize(pExprList);
if (num == 0) {
......@@ -1867,7 +1895,7 @@ void tscColumnListDestroy(SArray* pColumnList) {
static int32_t validateQuoteToken(SStrToken* pToken) {
tscDequoteAndTrimToken(pToken);
int32_t k = tSQLGetToken(pToken->z, &pToken->type);
int32_t k = tGetToken(pToken->z, &pToken->type);
if (pToken->type == TK_STRING) {
return tscValidateName(pToken);
......@@ -1935,7 +1963,7 @@ int32_t tscValidateName(SStrToken* pToken) {
tscStrToLower(pToken->z, pToken->n);
//pToken->n = (uint32_t)strtrim(pToken->z);
int len = tSQLGetToken(pToken->z, &pToken->type);
int len = tGetToken(pToken->z, &pToken->type);
// single token, validate it
if (len == pToken->n) {
......@@ -1961,7 +1989,7 @@ int32_t tscValidateName(SStrToken* pToken) {
pToken->n = (uint32_t)strtrim(pToken->z);
}
pToken->n = tSQLGetToken(pToken->z, &pToken->type);
pToken->n = tGetToken(pToken->z, &pToken->type);
if (pToken->z[pToken->n] != TS_PATH_DELIMITER[0]) {
return TSDB_CODE_TSC_INVALID_SQL;
}
......@@ -1978,7 +2006,7 @@ int32_t tscValidateName(SStrToken* pToken) {
pToken->z = sep + 1;
pToken->n = (uint32_t)(oldLen - (sep - pStr) - 1);
int32_t len = tSQLGetToken(pToken->z, &pToken->type);
int32_t len = tGetToken(pToken->z, &pToken->type);
if (len != pToken->n || (pToken->type != TK_STRING && pToken->type != TK_ID)) {
return TSDB_CODE_TSC_INVALID_SQL;
}
......@@ -2431,6 +2459,11 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableM
}
pTableMetaInfo->pTableMeta = pTableMeta;
if (pTableMetaInfo->pTableMeta == NULL) {
pTableMetaInfo->tableMetaSize = 0;
} else {
pTableMetaInfo->tableMetaSize = tscGetTableMetaSize(pTableMeta);
}
if (vgroupList != NULL) {
pTableMetaInfo->vgroupList = tscVgroupInfoClone(vgroupList);
......@@ -2706,6 +2739,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pTableMeta, pTableMetaInfo->vgroupList,
pTableMetaInfo->tagColList, pTableMetaInfo->pVgroupTables);
} else { // transfer the ownership of pTableMeta to the newly create sql object.
STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, pPrevSql->cmd.clauseIndex, 0);
if (pPrevInfo->pTableMeta && pPrevInfo->pTableMeta->tableType < 0) {
......@@ -3100,7 +3134,13 @@ void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp) {
//backup the total number of result first
int64_t num = pRes->numOfTotal + pRes->numOfClauseTotal;
// DON't free final since it may be recoreded and used later in APP
TAOS_FIELD* finalBk = pRes->final;
pRes->final = NULL;
tscFreeSqlResult(pSql);
pRes->final = finalBk;
pRes->numOfTotal = num;
......@@ -3333,11 +3373,11 @@ CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta) {
return cMeta;
}
int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name) {
assert(pChild != NULL);
int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name, void* buf) {
assert(pChild != NULL && buf != NULL);
uint32_t size = tscGetTableMetaMaxSize();
STableMeta* p = calloc(1, size);
// uint32_t size = tscGetTableMetaMaxSize();
STableMeta* p = buf;//calloc(1, size);
taosHashGetClone(tscTableMetaInfo, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, p, -1);
if (p->id.uid > 0) { // tableMeta exists, build child table meta and return
......@@ -3349,12 +3389,12 @@ int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name) {
memcpy(pChild->schema, p->schema, sizeof(SSchema) *total);
tfree(p);
// tfree(p);
return TSDB_CODE_SUCCESS;
} else { // super table has been removed, current tableMeta is also expired. remove it here
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
tfree(p);
// tfree(p);
return -1;
}
}
......
......@@ -4,7 +4,7 @@
#include <iostream>
#include "taos.h"
#include "tstoken.h"
#include "ttoken.h"
#include "tutil.h"
int main(int argc, char** argv) {
......
......@@ -80,6 +80,7 @@ enum {
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_TABLE_JOIN_RETRIEVE, "join-retrieve" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_TABLE, "show-create-table")
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_STABLE, "show-create-stable")
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_DATABASE, "show-create-database")
/*
......
......@@ -89,9 +89,6 @@ tExprNode* exprdup(tExprNode* pTree);
bool exprTreeApplyFilter(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *param);
typedef void (*_arithmetic_operator_fn_t)(void *left, int32_t numLeft, int32_t leftType, void *right, int32_t numRight,
int32_t rightType, void *output, int32_t order);
void arithmeticTreeTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput, void *param, int32_t order,
char *(*cb)(void *, const char*, int32_t));
......
......@@ -18,7 +18,7 @@
#include "os.h"
#include "taosmsg.h"
#include "tstoken.h"
#include "ttoken.h"
#include "tvariant.h"
typedef struct SDataStatis {
......
......@@ -16,8 +16,8 @@
#ifndef TDENGINE_TVARIANT_H
#define TDENGINE_TVARIANT_H
#include "tstoken.h"
#include "tarray.h"
#include "ttoken.h"
#ifdef __cplusplus
extern "C" {
......
......@@ -2,7 +2,7 @@
#include "tutil.h"
#include "tname.h"
#include "tstoken.h"
#include "ttoken.h"
#include "tvariant.h"
#define VALIDNUMOFCOLS(x) ((x) >= TSDB_MIN_COLUMNS && (x) <= TSDB_MAX_COLUMNS)
......
......@@ -14,14 +14,14 @@
*/
#include "os.h"
#include "tvariant.h"
#include "hash.h"
#include "taos.h"
#include "taosdef.h"
#include "tstoken.h"
#include "ttoken.h"
#include "ttokendef.h"
#include "tutil.h"
#include "ttype.h"
#include "tutil.h"
#include "tvariant.h"
void tVariantCreate(tVariant *pVar, SStrToken *token) {
int32_t ret = 0;
......@@ -49,7 +49,7 @@ void tVariantCreate(tVariant *pVar, SStrToken *token) {
ret = tStrToInteger(token->z, token->type, token->n, &pVar->i64, true);
if (ret != 0) {
SStrToken t = {0};
tSQLGetToken(token->z, &t.type);
tGetToken(token->z, &t.type);
if (t.type == TK_MINUS) { // it is a signed number which is greater than INT64_MAX or less than INT64_MIN
pVar->nType = -1; // -1 means error type
return;
......@@ -460,7 +460,7 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result
*result = (int64_t) pVariant->dKey;
} else if (pVariant->nType == TSDB_DATA_TYPE_BINARY) {
SStrToken token = {.z = pVariant->pz, .n = pVariant->nLen};
/*int32_t n = */tSQLGetToken(pVariant->pz, &token.type);
/*int32_t n = */tGetToken(pVariant->pz, &token.type);
if (token.type == TK_NULL) {
if (releaseVariantPtr) {
......@@ -495,10 +495,10 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result
wchar_t *endPtr = NULL;
SStrToken token = {0};
token.n = tSQLGetToken(pVariant->pz, &token.type);
token.n = tGetToken(pVariant->pz, &token.type);
if (token.type == TK_MINUS || token.type == TK_PLUS) {
token.n = tSQLGetToken(pVariant->pz + token.n, &token.type);
token.n = tGetToken(pVariant->pz + token.n, &token.type);
}
if (token.type == TK_FLOAT) {
......
......@@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED)
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
POST_BUILD
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.28-dist.jar ${LIBRARY_OUTPUT_PATH}
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.29.jar ${LIBRARY_OUTPUT_PATH}
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMENT "build jdbc driver")
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})
......
......@@ -5,7 +5,7 @@
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.28</version>
<version>2.0.29</version>
<packaging>jar</packaging>
<name>JDBCDriver</name>
......
......@@ -3,7 +3,7 @@
<modelVersion>4.0.0</modelVersion>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.28</version>
<version>2.0.29</version>
<packaging>jar</packaging>
<name>JDBCDriver</name>
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
......
......@@ -84,10 +84,12 @@ public abstract class AbstractResultSet extends WrapperImpl implements ResultSet
}
@Override
@Deprecated
public InputStream getUnicodeStream(int columnIndex) throws SQLException {
if (isClosed())
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED);
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
......@@ -171,6 +173,7 @@ public abstract class AbstractResultSet extends WrapperImpl implements ResultSet
}
@Override
@Deprecated
public InputStream getUnicodeStream(String columnLabel) throws SQLException {
return getUnicodeStream(findColumn(columnLabel));
}
......
......@@ -49,7 +49,7 @@ public class TSDBConnection extends AbstractConnection {
this.databaseMetaData.setConnection(this);
}
public TSDBJNIConnector getConnection() {
public TSDBJNIConnector getConnector() {
return this.connector;
}
......@@ -58,7 +58,7 @@ public class TSDBConnection extends AbstractConnection {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
return new TSDBStatement(this, this.connector);
return new TSDBStatement(this);
}
public TSDBSubscribe subscribe(String topic, String sql, boolean restart) throws SQLException {
......@@ -74,14 +74,18 @@ public class TSDBConnection extends AbstractConnection {
}
public PreparedStatement prepareStatement(String sql) throws SQLException {
if (isClosed())
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
return new TSDBPreparedStatement(this, this.connector, sql);
}
return new TSDBPreparedStatement(this, sql);
}
public void close() throws SQLException {
if (isClosed)
if (isClosed) {
return;
}
this.connector.closeConnection();
this.isClosed = true;
}
......
......@@ -104,7 +104,7 @@ public class TSDBDriver extends AbstractDriver {
static {
try {
java.sql.DriverManager.registerDriver(new TSDBDriver());
DriverManager.registerDriver(new TSDBDriver());
} catch (SQLException e) {
throw TSDBError.createRuntimeException(TSDBErrorNumbers.ERROR_CANNOT_REGISTER_JNI_DRIVER, e);
}
......
......@@ -18,6 +18,7 @@ package com.taosdata.jdbc;
import com.taosdata.jdbc.utils.TaosInfo;
import java.nio.ByteBuffer;
import java.sql.SQLException;
import java.sql.SQLWarning;
import java.util.List;
......@@ -29,10 +30,13 @@ public class TSDBJNIConnector {
private static volatile Boolean isInitialized = false;
private TaosInfo taosInfo = TaosInfo.getInstance();
// Connection pointer used in C
private long taos = TSDBConstants.JNI_NULL_POINTER;
// result set status in current connection
private boolean isResultsetClosed = true;
private boolean isResultsetClosed;
private int affectedRows = -1;
static {
......@@ -75,7 +79,6 @@ public class TSDBJNIConnector {
public boolean connect(String host, int port, String dbName, String user, String password) throws SQLException {
if (this.taos != TSDBConstants.JNI_NULL_POINTER) {
// this.closeConnectionImp(this.taos);
closeConnection();
this.taos = TSDBConstants.JNI_NULL_POINTER;
}
......@@ -97,12 +100,6 @@ public class TSDBJNIConnector {
* @throws SQLException
*/
public long executeQuery(String sql) throws SQLException {
// close previous result set if the user forgets to invoke the
// free method to close previous result set.
// if (!this.isResultsetClosed) {
// freeResultSet(taosResultSetPointer);
// }
Long pSql = 0l;
try {
pSql = this.executeQueryImp(sql.getBytes(TaosGlobalConfig.getCharset()), this.taos);
......@@ -135,6 +132,7 @@ public class TSDBJNIConnector {
// Try retrieving result set for the executed SQL using the current connection pointer.
pSql = this.getResultSetImp(this.taos, pSql);
// if pSql == 0L that means resultset is closed
isResultsetClosed = (pSql == TSDBConstants.JNI_NULL_POINTER);
return pSql;
......@@ -169,37 +167,14 @@ public class TSDBJNIConnector {
private native long isUpdateQueryImp(long connection, long pSql);
/**
* Free resultset operation from C to release resultset pointer by JNI
* Free result set operation from C to release result set pointer by JNI
*/
public int freeResultSet(long pSql) {
int res = TSDBConstants.JNI_SUCCESS;
// if (result != taosResultSetPointer && taosResultSetPointer != TSDBConstants.JNI_NULL_POINTER) {
// throw new RuntimeException("Invalid result set pointer");
// }
// if (taosResultSetPointer != TSDBConstants.JNI_NULL_POINTER) {
res = this.freeResultSetImp(this.taos, pSql);
// taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER;
// }
int res = this.freeResultSetImp(this.taos, pSql);
isResultsetClosed = true;
return res;
}
/**
* Close the open result set which is associated to the current connection. If the result set is already
* closed, return 0 for success.
*/
// public int freeResultSet() {
// int resCode = TSDBConstants.JNI_SUCCESS;
// if (!isResultsetClosed) {
// resCode = this.freeResultSetImp(this.taos, this.taosResultSetPointer);
// taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER;
// isResultsetClosed = true;
// }
// return resCode;
// }
private native int freeResultSetImp(long connection, long result);
/**
......@@ -246,6 +221,7 @@ public class TSDBJNIConnector {
*/
public void closeConnection() throws SQLException {
int code = this.closeConnectionImp(this.taos);
if (code < 0) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL);
} else if (code == 0) {
......@@ -253,6 +229,7 @@ public class TSDBJNIConnector {
} else {
throw new SQLException("Undefined error code returned by TDengine when closing a connection");
}
// invoke closeConnectionImpl only here
taosInfo.connect_close_increment();
}
......@@ -289,7 +266,7 @@ public class TSDBJNIConnector {
private native void unsubscribeImp(long subscription, boolean isKeep);
/**
* Validate if a <I>create table</I> sql statement is correct without actually creating that table
* Validate if a <I>create table</I> SQL statement is correct without actually creating that table
*/
public boolean validateCreateTableSql(String sql) {
int res = validateCreateTableSqlImp(taos, sql.getBytes());
......@@ -297,4 +274,66 @@ public class TSDBJNIConnector {
}
private native int validateCreateTableSqlImp(long connection, byte[] sqlBytes);
public long prepareStmt(String sql) throws SQLException {
Long stmt = 0L;
try {
stmt = prepareStmtImp(sql.getBytes(), this.taos);
} catch (Exception e) {
e.printStackTrace();
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_ENCODING);
}
if (stmt == TSDBConstants.JNI_CONNECTION_NULL) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL);
}
if (stmt == TSDBConstants.JNI_SQL_NULL) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_SQL_NULL);
}
if (stmt == TSDBConstants.JNI_OUT_OF_MEMORY) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_OUT_OF_MEMORY);
}
return stmt;
}
private native long prepareStmtImp(byte[] sql, long con);
public void setBindTableName(long stmt, String tableName) throws SQLException {
int code = setBindTableNameImp(stmt, tableName, this.taos);
if (code != TSDBConstants.JNI_SUCCESS) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to set table name");
}
}
private native int setBindTableNameImp(long stmt, String name, long conn);
public void bindColumnDataArray(long stmt, ByteBuffer colDataList, ByteBuffer lengthList, ByteBuffer isNullList, int type, int bytes, int numOfRows,int columnIndex) throws SQLException {
int code = bindColDataImp(stmt, colDataList.array(), lengthList.array(), isNullList.array(), type, bytes, numOfRows, columnIndex, this.taos);
if (code != TSDBConstants.JNI_SUCCESS) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to bind column data");
}
}
private native int bindColDataImp(long stmt, byte[] colDataList, byte[] lengthList, byte[] isNullList, int type, int bytes, int numOfRows, int columnIndex, long conn);
public void executeBatch(long stmt) throws SQLException {
int code = executeBatchImp(stmt, this.taos);
if (code != TSDBConstants.JNI_SUCCESS) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to execute batch bind");
}
}
private native int executeBatchImp(long stmt, long con);
public void closeBatch(long stmt) throws SQLException {
int code = closeStmt(stmt, this.taos);
if (code != TSDBConstants.JNI_SUCCESS) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to close batch bind");
}
}
private native int closeStmt(long stmt, long con);
}
......@@ -18,33 +18,40 @@ import com.taosdata.jdbc.utils.Utils;
import java.io.InputStream;
import java.io.Reader;
import java.io.UnsupportedEncodingException;
import java.math.BigDecimal;
import java.net.URL;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.sql.*;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Collections;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/*
* TDengine only supports a subset of the standard SQL, thus this implemetation of the
* TDengine only supports a subset of the standard SQL, thus this implementation of the
* standard JDBC API contains more or less some adjustments customized for certain
* compatibility needs.
*/
public class TSDBPreparedStatement extends TSDBStatement implements PreparedStatement {
private String rawSql;
private Object[] parameters;
private boolean isPrepared;
private ArrayList<ColumnInfo> colData;
private String tableName;
private long nativeStmtHandle = 0;
private volatile TSDBParameterMetaData parameterMetaData;
TSDBPreparedStatement(TSDBConnection connection, TSDBJNIConnector connecter, String sql) {
super(connection, connecter);
TSDBPreparedStatement(TSDBConnection connection, String sql) {
super(connection);
init(sql);
int parameterCnt = 0;
if (sql.contains("?")) {
int parameterCnt = 0;
for (int i = 0; i < sql.length(); i++) {
if ('?' == sql.charAt(i)) {
parameterCnt++;
......@@ -53,6 +60,12 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
parameters = new Object[parameterCnt];
this.isPrepared = true;
}
if (parameterCnt > 1) {
// the table name is also a parameter, so ignore it.
this.colData = new ArrayList<ColumnInfo>(parameterCnt - 1);
this.colData.addAll(Collections.nCopies(parameterCnt - 1, null));
}
}
private void init(String sql) {
......@@ -260,10 +273,14 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
@Override
public void setObject(int parameterIndex, Object x) throws SQLException {
if (isClosed())
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
if (parameterIndex < 1 && parameterIndex >= parameters.length)
}
if (parameterIndex < 1 && parameterIndex >= parameters.length) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE);
}
parameters[parameterIndex - 1] = x;
}
......@@ -300,9 +317,10 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
@Override
public void setRef(int parameterIndex, Ref x) throws SQLException {
if (isClosed())
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
......@@ -515,4 +533,276 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
///////////////////////////////////////////////////////////////////////
// NOTE: the following APIs are not JDBC compatible
// set the bind table name
private static class ColumnInfo {
@SuppressWarnings("rawtypes")
private ArrayList data;
private int type;
private int bytes;
private boolean typeIsSet;
public ColumnInfo() {
this.typeIsSet = false;
}
public void setType(int type) throws SQLException {
if (this.isTypeSet()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data type has been set");
}
this.typeIsSet = true;
this.type = type;
}
public boolean isTypeSet() {
return this.typeIsSet;
}
};
public void setTableName(String name) {
this.tableName = name;
}
public <T> void setValueImpl(int columnIndex, ArrayList<T> list, int type, int bytes) throws SQLException {
ColumnInfo col = (ColumnInfo) this.colData.get(columnIndex);
if (col == null) {
ColumnInfo p = new ColumnInfo();
p.setType(type);
p.bytes = bytes;
p.data = (ArrayList<?>) list.clone();
this.colData.set(columnIndex, p);
} else {
if (col.type != type) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data type mismatch");
}
col.data.addAll(list);
}
}
public void setInt(int columnIndex, ArrayList<Integer> list) throws SQLException {
setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_INT, Integer.BYTES);
}
public void setFloat(int columnIndex, ArrayList<Float> list) throws SQLException {
setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_FLOAT, Float.BYTES);
}
public void setTimestamp(int columnIndex, ArrayList<Long> list) throws SQLException {
setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP, Long.BYTES);
}
public void setLong(int columnIndex, ArrayList<Long> list) throws SQLException {
setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_BIGINT, Long.BYTES);
}
public void setDouble(int columnIndex, ArrayList<Double> list) throws SQLException {
setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_DOUBLE, Double.BYTES);
}
public void setBoolean(int columnIndex, ArrayList<Boolean> list) throws SQLException {
setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_BOOL, Byte.BYTES);
}
public void setByte(int columnIndex, ArrayList<Byte> list) throws SQLException {
setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_TINYINT, Byte.BYTES);
}
public void setShort(int columnIndex, ArrayList<Short> list) throws SQLException {
setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_SMALLINT, Short.BYTES);
}
public void setString(int columnIndex, ArrayList<String> list, int size) throws SQLException {
setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_BINARY, size);
}
// note: expand the required space for each NChar character
public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException {
setValueImpl(columnIndex, list, TSDBConstants.TSDB_DATA_TYPE_NCHAR, size * Integer.BYTES);
}
public void columnDataAddBatch() throws SQLException {
// pass the data block to native code
if (rawSql == null) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "sql statement not set yet");
}
// table name is not set yet, abort
if (this.tableName == null) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "table name not set yet");
}
int numOfCols = this.colData.size();
if (numOfCols == 0) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data not bind");
}
TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector();
this.nativeStmtHandle = connector.prepareStmt(rawSql);
connector.setBindTableName(this.nativeStmtHandle, this.tableName);
ColumnInfo colInfo = (ColumnInfo) this.colData.get(0);
if (colInfo == null) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data not bind");
}
int rows = colInfo.data.size();
for (int i = 0; i < numOfCols; ++i) {
ColumnInfo col1 = this.colData.get(i);
if (col1 == null || !col1.isTypeSet()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data not bind");
}
if (rows != col1.data.size()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "the rows in column data not identical");
}
ByteBuffer colDataList = ByteBuffer.allocate(rows * col1.bytes);
colDataList.order(ByteOrder.LITTLE_ENDIAN);
ByteBuffer lengthList = ByteBuffer.allocate(rows * Integer.BYTES);
lengthList.order(ByteOrder.LITTLE_ENDIAN);
ByteBuffer isNullList = ByteBuffer.allocate(rows * Byte.BYTES);
isNullList.order(ByteOrder.LITTLE_ENDIAN);
switch (col1.type) {
case TSDBConstants.TSDB_DATA_TYPE_INT: {
for (int j = 0; j < rows; ++j) {
Integer val = (Integer) col1.data.get(j);
colDataList.putInt(val == null? Integer.MIN_VALUE:val);
isNullList.put((byte) (val == null? 1:0));
}
break;
}
case TSDBConstants.TSDB_DATA_TYPE_TINYINT: {
for (int j = 0; j < rows; ++j) {
Byte val = (Byte) col1.data.get(j);
colDataList.put(val == null? 0:val);
isNullList.put((byte) (val == null? 1:0));
}
break;
}
case TSDBConstants.TSDB_DATA_TYPE_BOOL: {
for (int j = 0; j < rows; ++j) {
Boolean val = (Boolean) col1.data.get(j);
if (val == null) {
colDataList.put((byte) 0);
} else {
colDataList.put((byte) (val? 1:0));
}
isNullList.put((byte) (val == null? 1:0));
}
break;
}
case TSDBConstants.TSDB_DATA_TYPE_SMALLINT: {
for (int j = 0; j < rows; ++j) {
Short val = (Short) col1.data.get(j);
colDataList.putShort(val == null? 0:val);
isNullList.put((byte) (val == null? 1:0));
}
break;
}
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
case TSDBConstants.TSDB_DATA_TYPE_BIGINT: {
for (int j = 0; j < rows; ++j) {
Long val = (Long) col1.data.get(j);
colDataList.putLong(val == null? 0:val);
isNullList.put((byte) (val == null? 1:0));
}
break;
}
case TSDBConstants.TSDB_DATA_TYPE_FLOAT: {
for (int j = 0; j < rows; ++j) {
Float val = (Float) col1.data.get(j);
colDataList.putFloat(val == null? 0:val);
isNullList.put((byte) (val == null? 1:0));
}
break;
}
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: {
for (int j = 0; j < rows; ++j) {
Double val = (Double) col1.data.get(j);
colDataList.putDouble(val == null? 0:val);
isNullList.put((byte) (val == null? 1:0));
}
break;
}
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
case TSDBConstants.TSDB_DATA_TYPE_BINARY: {
String charset = TaosGlobalConfig.getCharset();
for (int j = 0; j < rows; ++j) {
String val = (String) col1.data.get(j);
colDataList.position(j * col1.bytes); // seek to the correct position
if (val != null) {
byte[] b = null;
try {
if (col1.type == TSDBConstants.TSDB_DATA_TYPE_BINARY) {
b = val.getBytes();
} else {
b = val.getBytes(charset);
}
} catch (UnsupportedEncodingException e) {
e.printStackTrace();
}
if (val.length() > col1.bytes) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "string data too long");
}
colDataList.put(b);
lengthList.putInt(b.length);
isNullList.put((byte) 0);
} else {
lengthList.putInt(0);
isNullList.put((byte) 1);
}
}
break;
}
case TSDBConstants.TSDB_DATA_TYPE_UTINYINT:
case TSDBConstants.TSDB_DATA_TYPE_USMALLINT:
case TSDBConstants.TSDB_DATA_TYPE_UINT:
case TSDBConstants.TSDB_DATA_TYPE_UBIGINT: {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "not support data types");
}
};
connector.bindColumnDataArray(this.nativeStmtHandle, colDataList, lengthList, isNullList, col1.type, col1.bytes, rows, i);
}
}
public void columnDataExecuteBatch() throws SQLException {
TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector();
connector.executeBatch(this.nativeStmtHandle);
this.columnDataClearBatch();
}
public void columnDataClearBatch() {
int size = this.colData.size();
this.colData.clear();
this.colData.addAll(Collections.nCopies(size, null));
this.tableName = null; // clear the table name
}
public void columnDataCloseBatch() throws SQLException {
TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector();
connector.closeBatch(this.nativeStmtHandle);
this.nativeStmtHandle = 0L;
this.tableName = null;
}
}
......@@ -109,6 +109,8 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet {
public void close() throws SQLException {
if (isClosed)
return;
if (this.statement == null)
return;
if (this.jniConnector != null) {
int code = this.jniConnector.freeResultSet(this.resultSetPointer);
if (code == TSDBConstants.JNI_CONNECTION_NULL) {
......@@ -461,12 +463,13 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet {
}
public boolean isClosed() throws SQLException {
if (isClosed)
return true;
if (jniConnector != null) {
isClosed = jniConnector.isResultsetClosed();
}
return isClosed;
// if (isClosed)
// return true;
// if (jniConnector != null) {
// isClosed = jniConnector.isResultsetClosed();
// }
// return isClosed;
}
public String getNString(int columnIndex) throws SQLException {
......
......@@ -29,6 +29,8 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import com.taosdata.jdbc.utils.NullType;
public class TSDBResultSetBlockData {
private int numOfRows = 0;
private int rowIndex = 0;
......@@ -164,59 +166,7 @@ public class TSDBResultSetBlockData {
}
}
private static class NullType {
private static final byte NULL_BOOL_VAL = 0x2;
private static final String NULL_STR = "null";
public String toString() {
return NullType.NULL_STR;
}
public static boolean isBooleanNull(byte val) {
return val == NullType.NULL_BOOL_VAL;
}
private static boolean isTinyIntNull(byte val) {
return val == Byte.MIN_VALUE;
}
private static boolean isSmallIntNull(short val) {
return val == Short.MIN_VALUE;
}
private static boolean isIntNull(int val) {
return val == Integer.MIN_VALUE;
}
private static boolean isBigIntNull(long val) {
return val == Long.MIN_VALUE;
}
private static boolean isFloatNull(float val) {
return Float.isNaN(val);
}
private static boolean isDoubleNull(double val) {
return Double.isNaN(val);
}
private static boolean isBinaryNull(byte[] val, int length) {
if (length != Byte.BYTES) {
return false;
}
return val[0] == 0xFF;
}
private static boolean isNcharNull(byte[] val, int length) {
if (length != Integer.BYTES) {
return false;
}
return (val[0] & val[1] & val[2] & val[3]) == 0xFF;
}
}
/**
* The original type may not be a string type, but will be converted to by
......@@ -488,8 +438,8 @@ public class TSDBResultSetBlockData {
}
try {
String ss = TaosGlobalConfig.getCharset();
return new String(dest, ss);
String charset = TaosGlobalConfig.getCharset();
return new String(dest, charset);
} catch (UnsupportedEncodingException e) {
e.printStackTrace();
}
......
......@@ -84,7 +84,8 @@ public class TSDBResultSetRowData {
data.set(col, value);
}
public int getInt(int col, int srcType) throws SQLException {
@SuppressWarnings("deprecation")
public int getInt(int col, int srcType) throws SQLException {
Object obj = data.get(col);
switch (srcType) {
......@@ -128,7 +129,7 @@ public class TSDBResultSetRowData {
long value = (long) obj;
if (value < 0)
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE);
return new Long(value).intValue();
return Long.valueOf(value).intValue();
}
}
......
......@@ -19,8 +19,6 @@ import java.sql.ResultSet;
import java.sql.SQLException;
public class TSDBStatement extends AbstractStatement {
private TSDBJNIConnector connector;
/**
* Status of current statement
*/
......@@ -29,29 +27,26 @@ public class TSDBStatement extends AbstractStatement {
private TSDBConnection connection;
private TSDBResultSet resultSet;
public void setConnection(TSDBConnection connection) {
this.connection = connection;
}
TSDBStatement(TSDBConnection connection, TSDBJNIConnector connector) {
TSDBStatement(TSDBConnection connection) {
this.connection = connection;
this.connector = connector;
}
public ResultSet executeQuery(String sql) throws SQLException {
// check if closed
if (isClosed())
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
}
//TODO: 如果在executeQuery方法中执行insert语句,那么先执行了SQL,再通过pSql来检查是否为一个insert语句,但这个insert SQL已经执行成功了
// execute query
long pSql = this.connector.executeQuery(sql);
long pSql = this.connection.getConnector().executeQuery(sql);
// if pSql is create/insert/update/delete/alter SQL
if (this.connector.isUpdateQuery(pSql)) {
this.connector.freeResultSet(pSql);
if (this.connection.getConnector().isUpdateQuery(pSql)) {
this.connection.getConnector().freeResultSet(pSql);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_WITH_EXECUTEQUERY);
}
TSDBResultSet res = new TSDBResultSet(this, this.connector, pSql);
TSDBResultSet res = new TSDBResultSet(this, this.connection.getConnector(), pSql);
res.setBatchFetch(this.connection.getBatchFetch());
return res;
}
......@@ -60,14 +55,14 @@ public class TSDBStatement extends AbstractStatement {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
long pSql = this.connector.executeQuery(sql);
long pSql = this.connection.getConnector().executeQuery(sql);
// if pSql is create/insert/update/delete/alter SQL
if (!this.connector.isUpdateQuery(pSql)) {
this.connector.freeResultSet(pSql);
if (!this.connection.getConnector().isUpdateQuery(pSql)) {
this.connection.getConnector().freeResultSet(pSql);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_WITH_EXECUTEUPDATE);
}
int affectedRows = this.connector.getAffectedRows(pSql);
this.connector.freeResultSet(pSql);
int affectedRows = this.connection.getConnector().getAffectedRows(pSql);
this.connection.getConnector().freeResultSet(pSql);
return affectedRows;
}
......@@ -81,30 +76,29 @@ public class TSDBStatement extends AbstractStatement {
public boolean execute(String sql) throws SQLException {
// check if closed
if (isClosed())
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
}
// execute query
long pSql = this.connector.executeQuery(sql);
long pSql = this.connection.getConnector().executeQuery(sql);
// if pSql is create/insert/update/delete/alter SQL
if (this.connector.isUpdateQuery(pSql)) {
this.affectedRows = this.connector.getAffectedRows(pSql);
this.connector.freeResultSet(pSql);
if (this.connection.getConnector().isUpdateQuery(pSql)) {
this.affectedRows = this.connection.getConnector().getAffectedRows(pSql);
this.connection.getConnector().freeResultSet(pSql);
return false;
}
this.resultSet = new TSDBResultSet(this, this.connector, pSql);
this.resultSet = new TSDBResultSet(this, this.connection.getConnector(), pSql);
this.resultSet.setBatchFetch(this.connection.getBatchFetch());
return true;
}
public ResultSet getResultSet() throws SQLException {
if (isClosed())
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
// long resultSetPointer = connector.getResultSet();
// TSDBResultSet resSet = null;
// if (resultSetPointer != TSDBConstants.JNI_NULL_POINTER) {
// resSet = new TSDBResultSet(connector, resultSetPointer);
// }
}
return this.resultSet;
}
......@@ -115,12 +109,20 @@ public class TSDBStatement extends AbstractStatement {
}
public Connection getConnection() throws SQLException {
if (isClosed())
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
if (this.connector == null)
}
if (this.connection.getConnector() == null) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL);
}
return this.connection;
}
public void setConnection(TSDBConnection connection) {
this.connection = connection;
}
public boolean isClosed() throws SQLException {
return isClosed;
......
......@@ -17,7 +17,7 @@ public class RestfulDriver extends AbstractDriver {
static {
try {
java.sql.DriverManager.registerDriver(new RestfulDriver());
DriverManager.registerDriver(new RestfulDriver());
} catch (SQLException e) {
throw TSDBError.createRuntimeException(TSDBErrorNumbers.ERROR_URL_NOT_SET, e);
}
......
package com.taosdata.jdbc.rs;
import com.google.common.collect.Range;
import com.google.common.collect.RangeSet;
import com.google.common.collect.TreeRangeSet;
import com.taosdata.jdbc.TSDBError;
import com.taosdata.jdbc.TSDBErrorNumbers;
import com.taosdata.jdbc.utils.SqlSyntaxValidator;
import com.taosdata.jdbc.utils.Utils;
import java.io.InputStream;
import java.io.Reader;
import java.math.BigDecimal;
import java.net.URL;
import java.nio.charset.Charset;
import java.sql.*;
import java.util.Calendar;
import java.util.HashMap;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
public class RestfulPreparedStatement extends RestfulStatement implements PreparedStatement {
......
package com.taosdata.jdbc.utils;
public class NullType {
private static final byte NULL_BOOL_VAL = 0x2;
private static final String NULL_STR = "null";
public String toString() {
return NullType.NULL_STR;
}
public static boolean isBooleanNull(byte val) {
return val == NullType.NULL_BOOL_VAL;
}
public static boolean isTinyIntNull(byte val) {
return val == Byte.MIN_VALUE;
}
public static boolean isSmallIntNull(short val) {
return val == Short.MIN_VALUE;
}
public static boolean isIntNull(int val) {
return val == Integer.MIN_VALUE;
}
public static boolean isBigIntNull(long val) {
return val == Long.MIN_VALUE;
}
public static boolean isFloatNull(float val) {
return Float.isNaN(val);
}
public static boolean isDoubleNull(double val) {
return Double.isNaN(val);
}
public static boolean isBinaryNull(byte[] val, int length) {
if (length != Byte.BYTES) {
return false;
}
return val[0] == 0xFF;
}
public static boolean isNcharNull(byte[] val, int length) {
if (length != Integer.BYTES) {
return false;
}
return (val[0] & val[1] & val[2] & val[3]) == 0xFF;
}
public static byte getBooleanNull() {
return NullType.NULL_BOOL_VAL;
}
public static byte getTinyintNull() {
return Byte.MIN_VALUE;
}
public static int getIntNull() {
return Integer.MIN_VALUE;
}
public static short getSmallIntNull() {
return Short.MIN_VALUE;
}
public static long getBigIntNull() {
return Long.MIN_VALUE;
}
public static int getFloatNull() {
return 0x7FF00000;
}
public static long getDoubleNull() {
return 0x7FFFFF0000000000L;
}
public static byte getBinaryNull() {
return (byte) 0xFF;
}
public static byte[] getNcharNull() {
return new byte[] {(byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF};
}
}
......@@ -67,7 +67,7 @@ public class Utils {
findPlaceholderPosition(preparedSql, placeholderPositions);
findClauseRangeSet(preparedSql, clause, clauseRangeSet);
return transformSql(preparedSql, parameters, placeholderPositions, clauseRangeSet);
return transformSql(rawSql, parameters, placeholderPositions, clauseRangeSet);
}
private static void findClauseRangeSet(String preparedSql, String[] regexArr, RangeSet<Integer> clauseRangeSet) {
......@@ -95,14 +95,15 @@ public class Utils {
/***
*
* @param preparedSql
* @param rawSql
* @param paramArr
* @param placeholderPosition
* @param clauseRangeSet
* @return
*/
private static String transformSql(String preparedSql, Object[] paramArr, Map<Integer, Integer> placeholderPosition, RangeSet<Integer> clauseRangeSet) {
String[] sqlArr = preparedSql.split("\\?");
private static String transformSql(String rawSql, Object[] paramArr, Map<Integer, Integer> placeholderPosition, RangeSet<Integer> clauseRangeSet) {
String[] sqlArr = rawSql.split("\\?");
return IntStream.range(0, sqlArr.length).mapToObj(index -> {
if (index == paramArr.length)
return sqlArr[index];
......
package com.taosdata.jdbc;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
......@@ -12,67 +13,74 @@ import java.util.Properties;
import java.util.concurrent.TimeUnit;
public class SubscribeTest {
Connection connection;
Statement statement;
String dbName = "test";
String tName = "t0";
String host = "127.0.0.1";
String topic = "test";
private long ts;
@Before
public void createDatabase() {
@Test
public void subscribe() {
try {
Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
String rawSql = "select * from " + dbName + "." + tName + ";";
TSDBConnection conn = connection.unwrap(TSDBConnection.class);
TSDBSubscribe subscribe = conn.subscribe(topic, rawSql, false);
for (int j = 0; j < 10; j++) {
TimeUnit.SECONDS.sleep(1);
TSDBResultSet resSet = subscribe.consume();
statement = connection.createStatement();
statement.execute("drop database if exists " + dbName);
statement.execute("create database if not exists " + dbName);
statement.execute("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)");
long ts = System.currentTimeMillis();
for (int i = 0; i < 2; i++) {
ts += i;
String sql = "insert into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")";
statement.executeUpdate(sql);
int rowCnt = 0;
while (resSet.next()) {
if (rowCnt == 0) {
long cur_ts = resSet.getTimestamp(1).getTime();
int k = resSet.getInt(2);
int v = resSet.getInt(3);
Assert.assertEquals(ts, cur_ts);
Assert.assertEquals(100, k);
Assert.assertEquals(1, v);
}
if (rowCnt == 1) {
long cur_ts = resSet.getTimestamp(1).getTime();
int k = resSet.getInt(2);
int v = resSet.getInt(3);
Assert.assertEquals(ts + 1, cur_ts);
Assert.assertEquals(101, k);
Assert.assertEquals(2, v);
}
rowCnt++;
}
if (j == 0)
Assert.assertEquals(2, rowCnt);
resSet.close();
}
subscribe.close(true);
} catch (ClassNotFoundException | SQLException e) {
return;
} catch (SQLException | InterruptedException throwables) {
throwables.printStackTrace();
}
}
@Test
public void subscribe() {
try {
String rawSql = "select * from " + dbName + "." + tName + ";";
System.out.println(rawSql);
// TSDBSubscribe subscribe = ((TSDBConnection) connection).subscribe(topic, rawSql, false);
@Before
public void createDatabase() throws SQLException {
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
// int a = 0;
// while (true) {
// TimeUnit.MILLISECONDS.sleep(1000);
// TSDBResultSet resSet = subscribe.consume();
// while (resSet.next()) {
// for (int i = 1; i <= resSet.getMetaData().getColumnCount(); i++) {
// System.out.printf(i + ": " + resSet.getString(i) + "\t");
// }
// System.out.println("\n======" + a + "==========");
// }
// a++;
// if (a >= 2) {
// break;
// }
// resSet.close();
// }
//
// subscribe.close(true);
} catch (Exception e) {
e.printStackTrace();
}
statement = connection.createStatement();
statement.execute("drop database if exists " + dbName);
statement.execute("create database if not exists " + dbName);
statement.execute("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)");
ts = System.currentTimeMillis();
statement.executeUpdate("insert into " + dbName + "." + tName + " values (" + ts + ", 100, 1)");
statement.executeUpdate("insert into " + dbName + "." + tName + " values (" + (ts + 1) + ", 101, 2)");
}
@After
......@@ -86,6 +94,5 @@ public class SubscribeTest {
} catch (SQLException e) {
e.printStackTrace();
}
}
}
\ No newline at end of file
......@@ -3,7 +3,6 @@ package com.taosdata.jdbc;
import com.google.common.primitives.Ints;
import com.google.common.primitives.Longs;
import com.google.common.primitives.Shorts;
import com.taosdata.jdbc.rs.RestfulResultSet;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
......@@ -177,7 +176,8 @@ public class TSDBResultSetTest {
rs.getAsciiStream("f1");
}
@Test(expected = SQLFeatureNotSupportedException.class)
@SuppressWarnings("deprecation")
@Test(expected = SQLFeatureNotSupportedException.class)
public void getUnicodeStream() throws SQLException {
rs.getUnicodeStream("f1");
}
......@@ -326,7 +326,7 @@ public class TSDBResultSetTest {
@Test(expected = SQLFeatureNotSupportedException.class)
public void getRow() throws SQLException {
int row = rs.getRow();
rs.getRow();
}
@Test(expected = SQLFeatureNotSupportedException.class)
......@@ -405,12 +405,12 @@ public class TSDBResultSetTest {
@Test(expected = SQLFeatureNotSupportedException.class)
public void updateByte() throws SQLException {
rs.updateByte(1, new Byte("0"));
rs.updateByte(1, (byte) 0);
}
@Test(expected = SQLFeatureNotSupportedException.class)
public void updateShort() throws SQLException {
rs.updateShort(1, new Short("0"));
rs.updateShort(1, (short) 0);
}
@Test(expected = SQLFeatureNotSupportedException.class)
......
......@@ -12,7 +12,7 @@ public class InsertSpecialCharacterJniTest {
private static String tbname1 = "test";
private static String tbname2 = "weather";
private static String special_character_str_1 = "$asd$$fsfsf$";
private static String special_character_str_2 = "\\asdfsfsf\\\\";
private static String special_character_str_2 = "\\\\asdfsfsf\\\\";
private static String special_character_str_3 = "\\\\asdfsfsf\\";
private static String special_character_str_4 = "?asd??fsf?sf?";
private static String special_character_str_5 = "?#sd@$f(('<(s[P)>\"){]}f?s[]{}%vaew|\"fsfs^a&d*jhg)(j))(f@~!?$";
......@@ -70,7 +70,7 @@ public class InsertSpecialCharacterJniTest {
String f1 = new String(rs.getBytes(2));
//TODO: bug to be fixed
// Assert.assertEquals(special_character_str_2, f1);
Assert.assertEquals(special_character_str_2.substring(0, special_character_str_1.length() - 2), f1);
Assert.assertEquals(special_character_str_2.substring(1, special_character_str_1.length() - 1), f1);
String f2 = rs.getString(3);
Assert.assertNull(f2);
}
......@@ -345,6 +345,32 @@ public class InsertSpecialCharacterJniTest {
}
}
@Test
public void testCase12() throws SQLException {
final long now = System.currentTimeMillis();
// insert
final String sql = "insert into " + tbname1 + "(ts, f1, f2) values(?, 'HelloTDengine', ?) ; ";
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
pstmt.setTimestamp(1, new Timestamp(now));
pstmt.setString(2, special_character_str_4);
int ret = pstmt.executeUpdate();
Assert.assertEquals(1, ret);
}
// query
final String query = "select * from " + tbname1;
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery(query);
rs.next();
long timestamp = rs.getTimestamp(1).getTime();
Assert.assertEquals(now, timestamp);
String f1 = new String(rs.getBytes(2));
Assert.assertEquals("HelloTDengine", f1);
String f2 = rs.getString(3);
Assert.assertEquals(special_character_str_4, f2);
}
}
@Before
public void before() throws SQLException {
try (Statement stmt = conn.createStatement()) {
......
......@@ -13,7 +13,7 @@ public class InsertSpecialCharacterRestfulTest {
private static String tbname1 = "test";
private static String tbname2 = "weather";
private static String special_character_str_1 = "$asd$$fsfsf$";
private static String special_character_str_2 = "\\asdfsfsf\\\\";
private static String special_character_str_2 = "\\\\asdfsfsf\\\\";
private static String special_character_str_3 = "\\\\asdfsfsf\\";
private static String special_character_str_4 = "?asd??fsf?sf?";
private static String special_character_str_5 = "?#sd@$f(('<(s[P)>\"){]}f?s[]{}%vaew|\"fsfs^a&d*jhg)(j))(f@~!?$";
......@@ -49,7 +49,7 @@ public class InsertSpecialCharacterRestfulTest {
@Test
public void testCase02() throws SQLException {
//TODO:
// Expected :\asdfsfsf\\
// Expected :\asdfsfsf\
// Actual :\asdfsfsf\
final long now = System.currentTimeMillis();
......@@ -71,7 +71,7 @@ public class InsertSpecialCharacterRestfulTest {
String f1 = new String(rs.getBytes(2));
//TODO: bug to be fixed
// Assert.assertEquals(special_character_str_2, f1);
Assert.assertEquals(special_character_str_2.substring(0, special_character_str_1.length() - 2), f1);
Assert.assertEquals(special_character_str_2.substring(1, special_character_str_1.length() - 1), f1);
String f2 = rs.getString(3);
Assert.assertNull(f2);
}
......@@ -346,6 +346,31 @@ public class InsertSpecialCharacterRestfulTest {
}
}
@Test
public void testCase12() throws SQLException {
final long now = System.currentTimeMillis();
// insert
final String sql = "insert into " + tbname1 + "(ts, f1, f2) values(?, 'HelloTDengine', ?) ; ";
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
pstmt.setTimestamp(1, new Timestamp(now));
pstmt.setString(2, special_character_str_4);
int ret = pstmt.executeUpdate();
Assert.assertEquals(1, ret);
}
// query
final String query = "select * from " + tbname1;
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery(query);
rs.next();
long timestamp = rs.getTimestamp(1).getTime();
Assert.assertEquals(now, timestamp);
String f1 = new String(rs.getBytes(2));
Assert.assertEquals("HelloTDengine", f1);
String f2 = rs.getString(3);
Assert.assertEquals(special_character_str_4, f2);
}
}
@Before
public void before() throws SQLException {
try (Statement stmt = conn.createStatement()) {
......
# Created by https://www.toptal.com/developers/gitignore/api/python
# Edit at https://www.toptal.com/developers/gitignore?templates=python
### Python ###
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
pytestdebug.log
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
doc/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
#poetry.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
# .env
.env/
.venv/
env/
venv/
ENV/
env.bak/
venv.bak/
pythonenv*
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# operating system-related files
# file properties cache/storage on macOS
*.DS_Store
# thumbnail cache on Windows
Thumbs.db
# profiling data
.prof
# End of https://www.toptal.com/developers/gitignore/api/python
# TDengine Connector for Python
[TDengine] connector for Python enables python programs to access TDengine, using an API which is compliant with the Python DB API 2.0 (PEP-249). It uses TDengine C client library for client server communications.
## Install
```sh
pip install git+https://github.com/taosdata/TDengine-connector-python
```
## Source Code
[TDengine] connector for Python source code is hosted on [GitHub](https://github.com/taosdata/TDengine-connector-python).
## License - AGPL
Keep same with [TDengine](https://github.com/taosdata/TDengine).
import taos
conn = taos.connect(host='127.0.0.1',
user='root',
passworkd='taodata',
database='log')
cursor = conn.cursor()
sql = "select * from log.log limit 10"
cursor.execute(sql)
for row in cursor:
print(row)
../
\ No newline at end of file
# TDengine python client interface
\ No newline at end of file
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="taos",
version="2.0.9",
author="Taosdata Inc.",
author_email="support@taosdata.com",
description="TDengine python client package",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/pypa/sampleproject",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 2",
"Operating System :: Linux",
],
)
from .cinterface import CTaosInterface
from .error import *
from .constants import FieldType
class TDengineCursor(object):
"""Database cursor which is used to manage the context of a fetch operation.
Attributes:
.description: Read-only attribute consists of 7-item sequences:
> name (mondatory)
> type_code (mondatory)
> display_size
> internal_size
> precision
> scale
> null_ok
This attribute will be None for operations that do not return rows or
if the cursor has not had an operation invoked via the .execute*() method yet.
.rowcount:This read-only attribute specifies the number of rows that the last
.execute*() produced (for DQL statements like SELECT) or affected
"""
def __init__(self, connection=None):
self._description = []
self._rowcount = -1
self._connection = None
self._result = None
self._fields = None
self._block = None
self._block_rows = -1
self._block_iter = 0
self._affected_rows = 0
self._logfile = ""
if connection is not None:
self._connection = connection
def __iter__(self):
return self
def next(self):
if self._result is None or self._fields is None:
raise OperationalError("Invalid use of fetch iterator")
if self._block_rows <= self._block_iter:
block, self._block_rows = CTaosInterface.fetchRow(
self._result, self._fields)
if self._block_rows == 0:
raise StopIteration
self._block = list(map(tuple, zip(*block)))
self._block_iter = 0
data = self._block[self._block_iter]
self._block_iter += 1
return data
@property
def description(self):
"""Return the description of the object.
"""
return self._description
@property
def rowcount(self):
"""Return the rowcount of the object
"""
return self._rowcount
@property
def affected_rows(self):
"""Return the affected_rows of the object
"""
return self._affected_rows
def callproc(self, procname, *args):
"""Call a stored database procedure with the given name.
Void functionality since no stored procedures.
"""
pass
def log(self, logfile):
self._logfile = logfile
def close(self):
"""Close the cursor.
"""
if self._connection is None:
return False
self._reset_result()
self._connection = None
return True
def execute(self, operation, params=None):
"""Prepare and execute a database operation (query or command).
"""
if not operation:
return None
if not self._connection:
# TODO : change the exception raised here
raise ProgrammingError("Cursor is not connected")
self._reset_result()
stmt = operation
if params is not None:
pass
# global querySeqNum
# querySeqNum += 1
# localSeqNum = querySeqNum # avoid raice condition
# print(" >> Exec Query ({}): {}".format(localSeqNum, str(stmt)))
self._result = CTaosInterface.query(self._connection._conn, stmt)
# print(" << Query ({}) Exec Done".format(localSeqNum))
if (self._logfile):
with open(self._logfile, "a") as logfile:
logfile.write("%s;\n" % operation)
errno = CTaosInterface.libtaos.taos_errno(self._result)
if errno == 0:
if CTaosInterface.fieldsCount(self._result) == 0:
self._affected_rows += CTaosInterface.affectedRows(
self._result)
return CTaosInterface.affectedRows(self._result)
else:
self._fields = CTaosInterface.useResult(
self._result)
return self._handle_result()
else:
raise ProgrammingError(
CTaosInterface.errStr(
self._result), errno)
def executemany(self, operation, seq_of_parameters):
"""Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters.
"""
pass
def fetchone(self):
"""Fetch the next row of a query result set, returning a single sequence, or None when no more data is available.
"""
pass
def fetchmany(self):
pass
def istype(self, col, dataType):
if (dataType.upper() == "BOOL"):
if (self._description[col][1] == FieldType.C_BOOL):
return True
if (dataType.upper() == "TINYINT"):
if (self._description[col][1] == FieldType.C_TINYINT):
return True
if (dataType.upper() == "TINYINT UNSIGNED"):
if (self._description[col][1] == FieldType.C_TINYINT_UNSIGNED):
return True
if (dataType.upper() == "SMALLINT"):
if (self._description[col][1] == FieldType.C_SMALLINT):
return True
if (dataType.upper() == "SMALLINT UNSIGNED"):
if (self._description[col][1] == FieldType.C_SMALLINT_UNSIGNED):
return True
if (dataType.upper() == "INT"):
if (self._description[col][1] == FieldType.C_INT):
return True
if (dataType.upper() == "INT UNSIGNED"):
if (self._description[col][1] == FieldType.C_INT_UNSIGNED):
return True
if (dataType.upper() == "BIGINT"):
if (self._description[col][1] == FieldType.C_BIGINT):
return True
if (dataType.upper() == "BIGINT UNSIGNED"):
if (self._description[col][1] == FieldType.C_BIGINT_UNSIGNED):
return True
if (dataType.upper() == "FLOAT"):
if (self._description[col][1] == FieldType.C_FLOAT):
return True
if (dataType.upper() == "DOUBLE"):
if (self._description[col][1] == FieldType.C_DOUBLE):
return True
if (dataType.upper() == "BINARY"):
if (self._description[col][1] == FieldType.C_BINARY):
return True
if (dataType.upper() == "TIMESTAMP"):
if (self._description[col][1] == FieldType.C_TIMESTAMP):
return True
if (dataType.upper() == "NCHAR"):
if (self._description[col][1] == FieldType.C_NCHAR):
return True
return False
def fetchall_row(self):
"""Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation.
"""
if self._result is None or self._fields is None:
raise OperationalError("Invalid use of fetchall")
buffer = [[] for i in range(len(self._fields))]
self._rowcount = 0
while True:
block, num_of_fields = CTaosInterface.fetchRow(
self._result, self._fields)
errno = CTaosInterface.libtaos.taos_errno(self._result)
if errno != 0:
raise ProgrammingError(
CTaosInterface.errStr(
self._result), errno)
if num_of_fields == 0:
break
self._rowcount += num_of_fields
for i in range(len(self._fields)):
buffer[i].extend(block[i])
return list(map(tuple, zip(*buffer)))
def fetchall(self):
if self._result is None or self._fields is None:
raise OperationalError("Invalid use of fetchall")
buffer = [[] for i in range(len(self._fields))]
self._rowcount = 0
while True:
block, num_of_fields = CTaosInterface.fetchBlock(
self._result, self._fields)
errno = CTaosInterface.libtaos.taos_errno(self._result)
if errno != 0:
raise ProgrammingError(
CTaosInterface.errStr(
self._result), errno)
if num_of_fields == 0:
break
self._rowcount += num_of_fields
for i in range(len(self._fields)):
buffer[i].extend(block[i])
return list(map(tuple, zip(*buffer)))
def nextset(self):
"""
"""
pass
def setinputsize(self, sizes):
pass
def setutputsize(self, size, column=None):
pass
def _reset_result(self):
"""Reset the result to unused version.
"""
self._description = []
self._rowcount = -1
if self._result is not None:
CTaosInterface.freeResult(self._result)
self._result = None
self._fields = None
self._block = None
self._block_rows = -1
self._block_iter = 0
self._affected_rows = 0
def _handle_result(self):
"""Handle the return result from query.
"""
self._description = []
for ele in self._fields:
self._description.append(
(ele['name'], ele['type'], None, None, None, None, False))
return self._result
../
\ No newline at end of file
Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
This program is free software: you can use, redistribute, and/or modify
it under the terms of the GNU Affero General Public License, version 3
or later ("AGPL"), as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
# TDengine python client interface
\ No newline at end of file
from .connection import TDengineConnection
from .cursor import TDengineCursor
# Globals
threadsafety = 0
paramstyle = 'pyformat'
__all__ = ['connection', 'cursor']
def connect(*args, **kwargs):
""" Function to return a TDengine connector object
Current supporting keyword parameters:
@dsn: Data source name as string
@user: Username as string(optional)
@password: Password as string(optional)
@host: Hostname(optional)
@database: Database name(optional)
@rtype: TDengineConnector
"""
return TDengineConnection(*args, **kwargs)
from .cursor import TDengineCursor
from .subscription import TDengineSubscription
from .cinterface import CTaosInterface
class TDengineConnection(object):
""" TDengine connection object
"""
def __init__(self, *args, **kwargs):
self._conn = None
self._host = None
self._user = "root"
self._password = "taosdata"
self._database = None
self._port = 0
self._config = None
self._chandle = None
self.config(**kwargs)
def config(self, **kwargs):
# host
if 'host' in kwargs:
self._host = kwargs['host']
# user
if 'user' in kwargs:
self._user = kwargs['user']
# password
if 'password' in kwargs:
self._password = kwargs['password']
# database
if 'database' in kwargs:
self._database = kwargs['database']
# port
if 'port' in kwargs:
self._port = kwargs['port']
# config
if 'config' in kwargs:
self._config = kwargs['config']
self._chandle = CTaosInterface(self._config)
self._conn = self._chandle.connect(
self._host,
self._user,
self._password,
self._database,
self._port)
def close(self):
"""Close current connection.
"""
return CTaosInterface.close(self._conn)
def subscribe(self, restart, topic, sql, interval):
"""Create a subscription.
"""
if self._conn is None:
return None
sub = CTaosInterface.subscribe(
self._conn, restart, topic, sql, interval)
return TDengineSubscription(sub)
def cursor(self):
"""Return a new Cursor object using the connection.
"""
return TDengineCursor(self)
def commit(self):
"""Commit any pending transaction to the database.
Since TDengine do not support transactions, the implement is void functionality.
"""
pass
def rollback(self):
"""Void functionality
"""
pass
def clear_result_set(self):
"""Clear unused result set on this connection.
"""
pass
if __name__ == "__main__":
conn = TDengineConnection(host='192.168.1.107')
conn.close()
print("Hello world")
"""Constants in TDengine python
"""
from .dbapi import *
class FieldType(object):
"""TDengine Field Types
"""
# type_code
C_NULL = 0
C_BOOL = 1
C_TINYINT = 2
C_SMALLINT = 3
C_INT = 4
C_BIGINT = 5
C_FLOAT = 6
C_DOUBLE = 7
C_BINARY = 8
C_TIMESTAMP = 9
C_NCHAR = 10
C_TINYINT_UNSIGNED = 11
C_SMALLINT_UNSIGNED = 12
C_INT_UNSIGNED = 13
C_BIGINT_UNSIGNED = 14
# NULL value definition
# NOTE: These values should change according to C definition in tsdb.h
C_BOOL_NULL = 0x02
C_TINYINT_NULL = -128
C_TINYINT_UNSIGNED_NULL = 255
C_SMALLINT_NULL = -32768
C_SMALLINT_UNSIGNED_NULL = 65535
C_INT_NULL = -2147483648
C_INT_UNSIGNED_NULL = 4294967295
C_BIGINT_NULL = -9223372036854775808
C_BIGINT_UNSIGNED_NULL = 18446744073709551615
C_FLOAT_NULL = float('nan')
C_DOUBLE_NULL = float('nan')
C_BINARY_NULL = bytearray([int('0xff', 16)])
# Timestamp precision definition
C_TIMESTAMP_MILLI = 0
C_TIMESTAMP_MICRO = 1
from .cinterface import CTaosInterface
from .error import *
from .constants import FieldType
# querySeqNum = 0
class TDengineCursor(object):
"""Database cursor which is used to manage the context of a fetch operation.
Attributes:
.description: Read-only attribute consists of 7-item sequences:
> name (mondatory)
> type_code (mondatory)
> display_size
> internal_size
> precision
> scale
> null_ok
This attribute will be None for operations that do not return rows or
if the cursor has not had an operation invoked via the .execute*() method yet.
.rowcount:This read-only attribute specifies the number of rows that the last
.execute*() produced (for DQL statements like SELECT) or affected
"""
def __init__(self, connection=None):
self._description = []
self._rowcount = -1
self._connection = None
self._result = None
self._fields = None
self._block = None
self._block_rows = -1
self._block_iter = 0
self._affected_rows = 0
self._logfile = ""
if connection is not None:
self._connection = connection
def __iter__(self):
return self
def __next__(self):
if self._result is None or self._fields is None:
raise OperationalError("Invalid use of fetch iterator")
if self._block_rows <= self._block_iter:
block, self._block_rows = CTaosInterface.fetchRow(
self._result, self._fields)
if self._block_rows == 0:
raise StopIteration
self._block = list(map(tuple, zip(*block)))
self._block_iter = 0
data = self._block[self._block_iter]
self._block_iter += 1
return data
@property
def description(self):
"""Return the description of the object.
"""
return self._description
@property
def rowcount(self):
"""Return the rowcount of the object
"""
return self._rowcount
@property
def affected_rows(self):
"""Return the rowcount of insertion
"""
return self._affected_rows
def callproc(self, procname, *args):
"""Call a stored database procedure with the given name.
Void functionality since no stored procedures.
"""
pass
def log(self, logfile):
self._logfile = logfile
def close(self):
"""Close the cursor.
"""
if self._connection is None:
return False
self._reset_result()
self._connection = None
return True
def execute(self, operation, params=None):
"""Prepare and execute a database operation (query or command).
"""
if not operation:
return None
if not self._connection:
# TODO : change the exception raised here
raise ProgrammingError("Cursor is not connected")
self._reset_result()
stmt = operation
if params is not None:
pass
# global querySeqNum
# querySeqNum += 1
# localSeqNum = querySeqNum # avoid raice condition
# print(" >> Exec Query ({}): {}".format(localSeqNum, str(stmt)))
self._result = CTaosInterface.query(self._connection._conn, stmt)
# print(" << Query ({}) Exec Done".format(localSeqNum))
if (self._logfile):
with open(self._logfile, "a") as logfile:
logfile.write("%s;\n" % operation)
errno = CTaosInterface.libtaos.taos_errno(self._result)
if errno == 0:
if CTaosInterface.fieldsCount(self._result) == 0:
self._affected_rows += CTaosInterface.affectedRows(
self._result)
return CTaosInterface.affectedRows(self._result)
else:
self._fields = CTaosInterface.useResult(
self._result)
return self._handle_result()
else:
raise ProgrammingError(
CTaosInterface.errStr(
self._result), errno)
def executemany(self, operation, seq_of_parameters):
"""Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters.
"""
pass
def fetchone(self):
"""Fetch the next row of a query result set, returning a single sequence, or None when no more data is available.
"""
pass
def fetchmany(self):
pass
def istype(self, col, dataType):
if (dataType.upper() == "BOOL"):
if (self._description[col][1] == FieldType.C_BOOL):
return True
if (dataType.upper() == "TINYINT"):
if (self._description[col][1] == FieldType.C_TINYINT):
return True
if (dataType.upper() == "TINYINT UNSIGNED"):
if (self._description[col][1] == FieldType.C_TINYINT_UNSIGNED):
return True
if (dataType.upper() == "SMALLINT"):
if (self._description[col][1] == FieldType.C_SMALLINT):
return True
if (dataType.upper() == "SMALLINT UNSIGNED"):
if (self._description[col][1] == FieldType.C_SMALLINT_UNSIGNED):
return True
if (dataType.upper() == "INT"):
if (self._description[col][1] == FieldType.C_INT):
return True
if (dataType.upper() == "INT UNSIGNED"):
if (self._description[col][1] == FieldType.C_INT_UNSIGNED):
return True
if (dataType.upper() == "BIGINT"):
if (self._description[col][1] == FieldType.C_BIGINT):
return True
if (dataType.upper() == "BIGINT UNSIGNED"):
if (self._description[col][1] == FieldType.C_BIGINT_UNSIGNED):
return True
if (dataType.upper() == "FLOAT"):
if (self._description[col][1] == FieldType.C_FLOAT):
return True
if (dataType.upper() == "DOUBLE"):
if (self._description[col][1] == FieldType.C_DOUBLE):
return True
if (dataType.upper() == "BINARY"):
if (self._description[col][1] == FieldType.C_BINARY):
return True
if (dataType.upper() == "TIMESTAMP"):
if (self._description[col][1] == FieldType.C_TIMESTAMP):
return True
if (dataType.upper() == "NCHAR"):
if (self._description[col][1] == FieldType.C_NCHAR):
return True
return False
def fetchall_row(self):
"""Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation.
"""
if self._result is None or self._fields is None:
raise OperationalError("Invalid use of fetchall")
buffer = [[] for i in range(len(self._fields))]
self._rowcount = 0
while True:
block, num_of_fields = CTaosInterface.fetchRow(
self._result, self._fields)
errno = CTaosInterface.libtaos.taos_errno(self._result)
if errno != 0:
raise ProgrammingError(
CTaosInterface.errStr(
self._result), errno)
if num_of_fields == 0:
break
self._rowcount += num_of_fields
for i in range(len(self._fields)):
buffer[i].extend(block[i])
return list(map(tuple, zip(*buffer)))
def fetchall(self):
if self._result is None or self._fields is None:
raise OperationalError("Invalid use of fetchall")
buffer = [[] for i in range(len(self._fields))]
self._rowcount = 0
while True:
block, num_of_fields = CTaosInterface.fetchBlock(
self._result, self._fields)
errno = CTaosInterface.libtaos.taos_errno(self._result)
if errno != 0:
raise ProgrammingError(
CTaosInterface.errStr(
self._result), errno)
if num_of_fields == 0:
break
self._rowcount += num_of_fields
for i in range(len(self._fields)):
buffer[i].extend(block[i])
return list(map(tuple, zip(*buffer)))
def nextset(self):
"""
"""
pass
def setinputsize(self, sizes):
pass
def setutputsize(self, size, column=None):
pass
def _reset_result(self):
"""Reset the result to unused version.
"""
self._description = []
self._rowcount = -1
if self._result is not None:
CTaosInterface.freeResult(self._result)
self._result = None
self._fields = None
self._block = None
self._block_rows = -1
self._block_iter = 0
self._affected_rows = 0
def _handle_result(self):
"""Handle the return result from query.
"""
self._description = []
for ele in self._fields:
self._description.append(
(ele['name'], ele['type'], None, None, None, None, False))
return self._result
"""Type Objects and Constructors.
"""
import time
import datetime
class DBAPITypeObject(object):
def __init__(self, *values):
self.values = values
def __com__(self, other):
if other in self.values:
return 0
if other < self.values:
return 1
else:
return -1
Date = datetime.date
Time = datetime.time
Timestamp = datetime.datetime
def DataFromTicks(ticks):
return Date(*time.localtime(ticks)[:3])
def TimeFromTicks(ticks):
return Time(*time.localtime(ticks)[3:6])
def TimestampFromTicks(ticks):
return Timestamp(*time.localtime(ticks)[:6])
Binary = bytes
# STRING = DBAPITypeObject(*constants.FieldType.get_string_types())
# BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types())
# NUMBER = BAPITypeObject(*constants.FieldType.get_number_types())
# DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types())
# ROWID = DBAPITypeObject()
"""Python exceptions
"""
class Error(Exception):
def __init__(self, msg=None, errno=None):
self.msg = msg
self._full_msg = self.msg
self.errno = errno
def __str__(self):
return self._full_msg
class Warning(Exception):
"""Exception raised for important warnings like data truncations while inserting.
"""
pass
class InterfaceError(Error):
"""Exception raised for errors that are related to the database interface rather than the database itself.
"""
pass
class DatabaseError(Error):
"""Exception raised for errors that are related to the database.
"""
pass
class DataError(DatabaseError):
"""Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range.
"""
pass
class OperationalError(DatabaseError):
"""Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer
"""
pass
class IntegrityError(DatabaseError):
"""Exception raised when the relational integrity of the database is affected.
"""
pass
class InternalError(DatabaseError):
"""Exception raised when the database encounters an internal error.
"""
pass
class ProgrammingError(DatabaseError):
"""Exception raised for programming errors.
"""
pass
class NotSupportedError(DatabaseError):
"""Exception raised in case a method or database API was used which is not supported by the database,.
"""
pass
from .cinterface import CTaosInterface
from .error import *
class TDengineSubscription(object):
"""TDengine subscription object
"""
def __init__(self, sub):
self._sub = sub
def consume(self):
"""Consume rows of a subscription
"""
if self._sub is None:
raise OperationalError("Invalid use of consume")
result, fields = CTaosInterface.consume(self._sub)
buffer = [[] for i in range(len(fields))]
while True:
block, num_of_fields = CTaosInterface.fetchBlock(result, fields)
if num_of_fields == 0:
break
for i in range(len(fields)):
buffer[i].extend(block[i])
self.fields = fields
return list(map(tuple, zip(*buffer)))
def close(self, keepProgress=True):
"""Close the Subscription.
"""
if self._sub is None:
return False
CTaosInterface.unsubscribe(self._sub, keepProgress)
return True
if __name__ == '__main__':
from .connection import TDengineConnection
conn = TDengineConnection(
host="127.0.0.1",
user="root",
password="taosdata",
database="test")
# Generate a cursor object to run SQL commands
sub = conn.subscribe(True, "test", "select * from meters;", 1000)
for i in range(0, 10):
data = sub.consume()
for d in data:
print(d)
sub.close()
conn.close()
../
\ No newline at end of file
Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
This program is free software: you can use, redistribute, and/or modify
it under the terms of the GNU Affero General Public License, version 3
or later ("AGPL"), as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
# TDengine python client interface
\ No newline at end of file
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="taos",
version="2.0.9",
author="Taosdata Inc.",
author_email="support@taosdata.com",
description="TDengine python client package",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/pypa/sampleproject",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: MacOS X",
],
)
from .connection import TDengineConnection
from .cursor import TDengineCursor
# Globals
threadsafety = 0
paramstyle = 'pyformat'
__all__ = ['connection', 'cursor']
def connect(*args, **kwargs):
""" Function to return a TDengine connector object
Current supporting keyword parameters:
@dsn: Data source name as string
@user: Username as string(optional)
@password: Password as string(optional)
@host: Hostname(optional)
@database: Database name(optional)
@rtype: TDengineConnector
"""
return TDengineConnection(*args, **kwargs)
../
\ No newline at end of file
此差异已折叠。
# TDengine python client interface
\ No newline at end of file
此差异已折叠。
../
\ No newline at end of file
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册