提交 a2e6ebab 编写于 作者: sangshuduo's avatar sangshuduo

Merge branch '2.6' into feat/sangshuduo/TD-14141-update-taostools-for2.6

...@@ -7,7 +7,9 @@ ...@@ -7,7 +7,9 @@
* See COPYRIGHT in top-level directory. * See COPYRIGHT in top-level directory.
*/ */
#ifndef WINDOWS #ifndef WINDOWS
#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
#pragma GCC diagnostic push #pragma GCC diagnostic push
#endif
#pragma GCC diagnostic ignored "-Wchar-subscripts" #pragma GCC diagnostic ignored "-Wchar-subscripts"
#endif #endif
...@@ -233,5 +235,7 @@ INLINE void updateLossyCompElement_Float(unsigned char* diffBytes, unsigned char ...@@ -233,5 +235,7 @@ INLINE void updateLossyCompElement_Float(unsigned char* diffBytes, unsigned char
} }
#ifndef WINDOWS #ifndef WINDOWS
#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
#pragma GCC diagnostic pop #pragma GCC diagnostic pop
#endif #endif
\ No newline at end of file #endif
...@@ -117,7 +117,7 @@ node qremove(HuffmanTree* huffmanTree) ...@@ -117,7 +117,7 @@ node qremove(HuffmanTree* huffmanTree)
/** /**
* @out1 should be set to 0. * @out1 should be set to 0.
* @out2 should be 0 as well. * @out2 should be 0 as well.
* @index: the index of the byte * @idx: the idx of the byte
* */ * */
void build_code(HuffmanTree *huffmanTree, node n, int len, unsigned long out1, unsigned long out2) void build_code(HuffmanTree *huffmanTree, node n, int len, unsigned long out1, unsigned long out2)
{ {
...@@ -136,8 +136,8 @@ void build_code(HuffmanTree *huffmanTree, node n, int len, unsigned long out1, u ...@@ -136,8 +136,8 @@ void build_code(HuffmanTree *huffmanTree, node n, int len, unsigned long out1, u
huffmanTree->cout[n->c] = (unsigned char)len; huffmanTree->cout[n->c] = (unsigned char)len;
return; return;
} }
int index = len >> 6; //=len/64 int idx = len >> 6; //=len/64
if(index == 0) if(idx == 0)
{ {
out1 = out1 << 1; out1 = out1 << 1;
out1 = out1 | 0; out1 = out1 | 0;
...@@ -164,13 +164,13 @@ void build_code(HuffmanTree *huffmanTree, node n, int len, unsigned long out1, u ...@@ -164,13 +164,13 @@ void build_code(HuffmanTree *huffmanTree, node n, int len, unsigned long out1, u
* */ * */
void init(HuffmanTree* huffmanTree, int *s, size_t length) void init(HuffmanTree* huffmanTree, int *s, size_t length)
{ {
size_t i, index; size_t i, idx;
size_t *freq = (size_t *)malloc(huffmanTree->allNodes*sizeof(size_t)); size_t *freq = (size_t *)malloc(huffmanTree->allNodes*sizeof(size_t));
memset(freq, 0, huffmanTree->allNodes*sizeof(size_t)); memset(freq, 0, huffmanTree->allNodes*sizeof(size_t));
for(i = 0;i < length;i++) for(i = 0;i < length;i++)
{ {
index = s[i]; idx = s[i];
freq[index]++; freq[idx]++;
} }
for (i = 0; i < huffmanTree->allNodes; i++) for (i = 0; i < huffmanTree->allNodes; i++)
......
...@@ -25,9 +25,9 @@ void new_TightDataPointStorageD_Empty(TightDataPointStorageD **this) ...@@ -25,9 +25,9 @@ void new_TightDataPointStorageD_Empty(TightDataPointStorageD **this)
int new_TightDataPointStorageD_fromFlatBytes(TightDataPointStorageD **this, unsigned char* flatBytes, size_t flatBytesLength, sz_exedata* pde_exe, sz_params* pde_params) int new_TightDataPointStorageD_fromFlatBytes(TightDataPointStorageD **this, unsigned char* flatBytes, size_t flatBytesLength, sz_exedata* pde_exe, sz_params* pde_params)
{ {
new_TightDataPointStorageD_Empty(this); new_TightDataPointStorageD_Empty(this);
size_t i, index = 0; size_t i, idx = 0;
unsigned char version = flatBytes[index++]; //3 unsigned char version = flatBytes[idx++]; //3
unsigned char sameRByte = flatBytes[index++]; //1 unsigned char sameRByte = flatBytes[idx++]; //1
// parse data format // parse data format
switch (version) switch (version)
...@@ -46,15 +46,15 @@ int new_TightDataPointStorageD_fromFlatBytes(TightDataPointStorageD **this, unsi ...@@ -46,15 +46,15 @@ int new_TightDataPointStorageD_fromFlatBytes(TightDataPointStorageD **this, unsi
pde_params->accelerate_pw_rel_compression = (sameRByte & 0x08) >> 3; pde_params->accelerate_pw_rel_compression = (sameRByte & 0x08) >> 3;
int errorBoundMode = SZ_ABS; int errorBoundMode = SZ_ABS;
convertBytesToSZParams(&(flatBytes[index]), pde_params, pde_exe); convertBytesToSZParams(&(flatBytes[idx]), pde_params, pde_exe);
index += MetaDataByteLength_double; idx += MetaDataByteLength_double;
int isRegression = (sameRByte >> 7) & 0x01; int isRegression = (sameRByte >> 7) & 0x01;
unsigned char dsLengthBytes[8]; unsigned char dsLengthBytes[8];
for (i = 0; i < pde_exe->SZ_SIZE_TYPE; i++) for (i = 0; i < pde_exe->SZ_SIZE_TYPE; i++)
dsLengthBytes[i] = flatBytes[index++]; dsLengthBytes[i] = flatBytes[idx++];
(*this)->dataSeriesLength = bytesToSize(dsLengthBytes, pde_exe->SZ_SIZE_TYPE); (*this)->dataSeriesLength = bytesToSize(dsLengthBytes, pde_exe->SZ_SIZE_TYPE);
if((*this)->isLossless==1) if((*this)->isLossless==1)
...@@ -65,7 +65,7 @@ int new_TightDataPointStorageD_fromFlatBytes(TightDataPointStorageD **this, unsi ...@@ -65,7 +65,7 @@ int new_TightDataPointStorageD_fromFlatBytes(TightDataPointStorageD **this, unsi
else if(same==1) else if(same==1)
{ {
(*this)->allSameData = 1; (*this)->allSameData = 1;
(*this)->exactMidBytes = &(flatBytes[index]); (*this)->exactMidBytes = &(flatBytes[idx]);
return errorBoundMode; return errorBoundMode;
} }
else else
...@@ -74,42 +74,42 @@ int new_TightDataPointStorageD_fromFlatBytes(TightDataPointStorageD **this, unsi ...@@ -74,42 +74,42 @@ int new_TightDataPointStorageD_fromFlatBytes(TightDataPointStorageD **this, unsi
if(isRegression == 1) if(isRegression == 1)
{ {
(*this)->raBytes_size = flatBytesLength - 3 - 1 - MetaDataByteLength_double - pde_exe->SZ_SIZE_TYPE; (*this)->raBytes_size = flatBytesLength - 3 - 1 - MetaDataByteLength_double - pde_exe->SZ_SIZE_TYPE;
(*this)->raBytes = &(flatBytes[index]); (*this)->raBytes = &(flatBytes[idx]);
return errorBoundMode; return errorBoundMode;
} }
unsigned char byteBuf[8]; unsigned char byteBuf[8];
for (i = 0; i < 4; i++) for (i = 0; i < 4; i++)
byteBuf[i] = flatBytes[index++]; byteBuf[i] = flatBytes[idx++];
int max_quant_intervals = bytesToInt_bigEndian(byteBuf);// 4 int max_quant_intervals = bytesToInt_bigEndian(byteBuf);// 4
pde_params->maxRangeRadius = max_quant_intervals/2; pde_params->maxRangeRadius = max_quant_intervals/2;
for (i = 0; i < 4; i++) for (i = 0; i < 4; i++)
byteBuf[i] = flatBytes[index++]; byteBuf[i] = flatBytes[idx++];
(*this)->intervals = bytesToInt_bigEndian(byteBuf);// 4 (*this)->intervals = bytesToInt_bigEndian(byteBuf);// 4
for (i = 0; i < 8; i++) for (i = 0; i < 8; i++)
byteBuf[i] = flatBytes[index++]; byteBuf[i] = flatBytes[idx++];
(*this)->medianValue = bytesToDouble(byteBuf);//8 (*this)->medianValue = bytesToDouble(byteBuf);//8
(*this)->reqLength = flatBytes[index++]; //1 (*this)->reqLength = flatBytes[idx++]; //1
for (i = 0; i < 8; i++) for (i = 0; i < 8; i++)
byteBuf[i] = flatBytes[index++]; byteBuf[i] = flatBytes[idx++];
(*this)->realPrecision = bytesToDouble(byteBuf);//8 (*this)->realPrecision = bytesToDouble(byteBuf);//8
for (i = 0; i < pde_exe->SZ_SIZE_TYPE; i++) for (i = 0; i < pde_exe->SZ_SIZE_TYPE; i++)
byteBuf[i] = flatBytes[index++]; byteBuf[i] = flatBytes[idx++];
(*this)->typeArray_size = bytesToSize(byteBuf, pde_exe->SZ_SIZE_TYPE); (*this)->typeArray_size = bytesToSize(byteBuf, pde_exe->SZ_SIZE_TYPE);
for (i = 0; i < pde_exe->SZ_SIZE_TYPE; i++) for (i = 0; i < pde_exe->SZ_SIZE_TYPE; i++)
byteBuf[i] = flatBytes[index++]; byteBuf[i] = flatBytes[idx++];
(*this)->exactDataNum = bytesToSize(byteBuf, pde_exe->SZ_SIZE_TYPE);// ST (*this)->exactDataNum = bytesToSize(byteBuf, pde_exe->SZ_SIZE_TYPE);// ST
for (i = 0; i < pde_exe->SZ_SIZE_TYPE; i++) for (i = 0; i < pde_exe->SZ_SIZE_TYPE; i++)
byteBuf[i] = flatBytes[index++]; byteBuf[i] = flatBytes[idx++];
(*this)->exactMidBytes_size = bytesToSize(byteBuf, pde_exe->SZ_SIZE_TYPE);// ST (*this)->exactMidBytes_size = bytesToSize(byteBuf, pde_exe->SZ_SIZE_TYPE);// ST
size_t logicLeadNumBitsNum = (*this)->exactDataNum * 2; size_t logicLeadNumBitsNum = (*this)->exactDataNum * 2;
...@@ -122,12 +122,12 @@ int new_TightDataPointStorageD_fromFlatBytes(TightDataPointStorageD **this, unsi ...@@ -122,12 +122,12 @@ int new_TightDataPointStorageD_fromFlatBytes(TightDataPointStorageD **this, unsi
(*this)->leadNumArray_size = (logicLeadNumBitsNum >> 3) + 1; (*this)->leadNumArray_size = (logicLeadNumBitsNum >> 3) + 1;
} }
(*this)->typeArray = &flatBytes[index]; (*this)->typeArray = &flatBytes[idx];
//retrieve the number of states (i.e., stateNum) //retrieve the number of states (i.e., stateNum)
(*this)->allNodes = bytesToInt_bigEndian((*this)->typeArray); //the first 4 bytes store the stateNum (*this)->allNodes = bytesToInt_bigEndian((*this)->typeArray); //the first 4 bytes store the stateNum
(*this)->stateNum = ((*this)->allNodes+1)/2; (*this)->stateNum = ((*this)->allNodes+1)/2;
index+=(*this)->typeArray_size; idx+=(*this)->typeArray_size;
// todo need check length // todo need check length
...@@ -135,15 +135,15 @@ int new_TightDataPointStorageD_fromFlatBytes(TightDataPointStorageD **this, unsi ...@@ -135,15 +135,15 @@ int new_TightDataPointStorageD_fromFlatBytes(TightDataPointStorageD **this, unsi
- pde_exe->SZ_SIZE_TYPE - pde_exe->SZ_SIZE_TYPE - pde_exe->SZ_SIZE_TYPE - pde_exe->SZ_SIZE_TYPE - pde_exe->SZ_SIZE_TYPE - pde_exe->SZ_SIZE_TYPE
- (*this)->leadNumArray_size - (*this)->exactMidBytes_size - (*this)->typeArray_size; - (*this)->leadNumArray_size - (*this)->exactMidBytes_size - (*this)->typeArray_size;
(*this)->leadNumArray = &flatBytes[index]; (*this)->leadNumArray = &flatBytes[idx];
index+=(*this)->leadNumArray_size; idx+=(*this)->leadNumArray_size;
(*this)->exactMidBytes = &flatBytes[index]; (*this)->exactMidBytes = &flatBytes[idx];
index+=(*this)->exactMidBytes_size; idx+=(*this)->exactMidBytes_size;
(*this)->residualMidBits = &flatBytes[index]; (*this)->residualMidBits = &flatBytes[idx];
return errorBoundMode; return errorBoundMode;
......
...@@ -25,15 +25,15 @@ void new_TightDataPointStorageF_Empty(TightDataPointStorageF **this) ...@@ -25,15 +25,15 @@ void new_TightDataPointStorageF_Empty(TightDataPointStorageF **this)
int new_TightDataPointStorageF_fromFlatBytes(TightDataPointStorageF **this, unsigned char* flatBytes, size_t flatBytesLength, sz_exedata* pde_exe, sz_params* pde_params) int new_TightDataPointStorageF_fromFlatBytes(TightDataPointStorageF **this, unsigned char* flatBytes, size_t flatBytesLength, sz_exedata* pde_exe, sz_params* pde_params)
{ {
new_TightDataPointStorageF_Empty(this); new_TightDataPointStorageF_Empty(this);
size_t i, index = 0; size_t i, idx = 0;
// //
// parse tdps // parse tdps
// //
// 1 version(1) // 1 version(1)
unsigned char version = flatBytes[index++]; //1 unsigned char version = flatBytes[idx++]; //1
unsigned char sameRByte = flatBytes[index++]; //1 unsigned char sameRByte = flatBytes[idx++]; //1
// parse data format // parse data format
switch (version) switch (version)
...@@ -51,12 +51,12 @@ int new_TightDataPointStorageF_fromFlatBytes(TightDataPointStorageF **this, unsi ...@@ -51,12 +51,12 @@ int new_TightDataPointStorageF_fromFlatBytes(TightDataPointStorageF **this, unsi
pde_exe->SZ_SIZE_TYPE = ((sameRByte & 0x40)>>6)==1?8:4; //0100,0000 pde_exe->SZ_SIZE_TYPE = ((sameRByte & 0x40)>>6)==1?8:4; //0100,0000
int errorBoundMode = SZ_ABS; int errorBoundMode = SZ_ABS;
// 3 meta(2) // 3 meta(2)
convertBytesToSZParams(&(flatBytes[index]), pde_params, pde_exe); convertBytesToSZParams(&(flatBytes[idx]), pde_params, pde_exe);
index += MetaDataByteLength; idx += MetaDataByteLength;
// 4 element count(4) // 4 element count(4)
unsigned char dsLengthBytes[8]; unsigned char dsLengthBytes[8];
for (i = 0; i < pde_exe->SZ_SIZE_TYPE; i++) for (i = 0; i < pde_exe->SZ_SIZE_TYPE; i++)
dsLengthBytes[i] = flatBytes[index++]; dsLengthBytes[i] = flatBytes[idx++];
(*this)->dataSeriesLength = bytesToSize(dsLengthBytes, pde_exe->SZ_SIZE_TYPE);// 4 or 8 (*this)->dataSeriesLength = bytesToSize(dsLengthBytes, pde_exe->SZ_SIZE_TYPE);// 4 or 8
if((*this)->isLossless==1) if((*this)->isLossless==1)
{ {
...@@ -66,7 +66,7 @@ int new_TightDataPointStorageF_fromFlatBytes(TightDataPointStorageF **this, unsi ...@@ -66,7 +66,7 @@ int new_TightDataPointStorageF_fromFlatBytes(TightDataPointStorageF **this, unsi
else if(same==1) else if(same==1)
{ {
(*this)->allSameData = 1; (*this)->allSameData = 1;
(*this)->exactMidBytes = &(flatBytes[index]); (*this)->exactMidBytes = &(flatBytes[idx]);
return errorBoundMode; return errorBoundMode;
} }
else else
...@@ -76,40 +76,40 @@ int new_TightDataPointStorageF_fromFlatBytes(TightDataPointStorageF **this, unsi ...@@ -76,40 +76,40 @@ int new_TightDataPointStorageF_fromFlatBytes(TightDataPointStorageF **this, unsi
if(isRegression == 1) if(isRegression == 1)
{ {
(*this)->raBytes_size = flatBytesLength - 1 - 1 - MetaDataByteLength - pde_exe->SZ_SIZE_TYPE; (*this)->raBytes_size = flatBytesLength - 1 - 1 - MetaDataByteLength - pde_exe->SZ_SIZE_TYPE;
(*this)->raBytes = &(flatBytes[index]); (*this)->raBytes = &(flatBytes[idx]);
return errorBoundMode; return errorBoundMode;
} }
// 5 quant intervals(4) // 5 quant intervals(4)
unsigned char byteBuf[8]; unsigned char byteBuf[8];
for (i = 0; i < 4; i++) for (i = 0; i < 4; i++)
byteBuf[i] = flatBytes[index++]; byteBuf[i] = flatBytes[idx++];
int max_quant_intervals = bytesToInt_bigEndian(byteBuf);// 4 int max_quant_intervals = bytesToInt_bigEndian(byteBuf);// 4
pde_params->maxRangeRadius = max_quant_intervals/2; pde_params->maxRangeRadius = max_quant_intervals/2;
// 6 intervals // 6 intervals
for (i = 0; i < 4; i++) for (i = 0; i < 4; i++)
byteBuf[i] = flatBytes[index++]; byteBuf[i] = flatBytes[idx++];
(*this)->intervals = bytesToInt_bigEndian(byteBuf);// 4 (*this)->intervals = bytesToInt_bigEndian(byteBuf);// 4
// 7 median // 7 median
for (i = 0; i < 4; i++) for (i = 0; i < 4; i++)
byteBuf[i] = flatBytes[index++]; byteBuf[i] = flatBytes[idx++];
(*this)->medianValue = bytesToFloat(byteBuf); //4 (*this)->medianValue = bytesToFloat(byteBuf); //4
// 8 reqLength // 8 reqLength
(*this)->reqLength = flatBytes[index++]; //1 (*this)->reqLength = flatBytes[idx++]; //1
// 9 realPrecision(8) // 9 realPrecision(8)
for (i = 0; i < 8; i++) for (i = 0; i < 8; i++)
byteBuf[i] = flatBytes[index++]; byteBuf[i] = flatBytes[idx++];
(*this)->realPrecision = bytesToDouble(byteBuf);//8 (*this)->realPrecision = bytesToDouble(byteBuf);//8
// 10 typeArray_size // 10 typeArray_size
for (i = 0; i < pde_exe->SZ_SIZE_TYPE; i++) for (i = 0; i < pde_exe->SZ_SIZE_TYPE; i++)
byteBuf[i] = flatBytes[index++]; byteBuf[i] = flatBytes[idx++];
(*this)->typeArray_size = bytesToSize(byteBuf, pde_exe->SZ_SIZE_TYPE);// 4 (*this)->typeArray_size = bytesToSize(byteBuf, pde_exe->SZ_SIZE_TYPE);// 4
// 11 exactNum // 11 exactNum
for (i = 0; i < pde_exe->SZ_SIZE_TYPE; i++) for (i = 0; i < pde_exe->SZ_SIZE_TYPE; i++)
byteBuf[i] = flatBytes[index++]; byteBuf[i] = flatBytes[idx++];
(*this)->exactDataNum = bytesToSize(byteBuf, pde_exe->SZ_SIZE_TYPE);// ST (*this)->exactDataNum = bytesToSize(byteBuf, pde_exe->SZ_SIZE_TYPE);// ST
// 12 mid size // 12 mid size
for (i = 0; i < pde_exe->SZ_SIZE_TYPE; i++) for (i = 0; i < pde_exe->SZ_SIZE_TYPE; i++)
byteBuf[i] = flatBytes[index++]; byteBuf[i] = flatBytes[idx++];
(*this)->exactMidBytes_size = bytesToSize(byteBuf, pde_exe->SZ_SIZE_TYPE);// STqq (*this)->exactMidBytes_size = bytesToSize(byteBuf, pde_exe->SZ_SIZE_TYPE);// STqq
// calc leadNumArray_size // calc leadNumArray_size
...@@ -124,20 +124,20 @@ int new_TightDataPointStorageF_fromFlatBytes(TightDataPointStorageF **this, unsi ...@@ -124,20 +124,20 @@ int new_TightDataPointStorageF_fromFlatBytes(TightDataPointStorageF **this, unsi
} }
// 13 typeArray // 13 typeArray
(*this)->typeArray = &flatBytes[index]; (*this)->typeArray = &flatBytes[idx];
//retrieve the number of states (i.e., stateNum) //retrieve the number of states (i.e., stateNum)
(*this)->allNodes = bytesToInt_bigEndian((*this)->typeArray); //the first 4 bytes store the stateNum (*this)->allNodes = bytesToInt_bigEndian((*this)->typeArray); //the first 4 bytes store the stateNum
(*this)->stateNum = ((*this)->allNodes+1)/2; (*this)->stateNum = ((*this)->allNodes+1)/2;
index+=(*this)->typeArray_size; idx+=(*this)->typeArray_size;
// 14 leadNumArray // 14 leadNumArray
(*this)->leadNumArray = &flatBytes[index]; (*this)->leadNumArray = &flatBytes[idx];
index += (*this)->leadNumArray_size; idx += (*this)->leadNumArray_size;
// 15 exactMidBytes // 15 exactMidBytes
(*this)->exactMidBytes = &flatBytes[index]; (*this)->exactMidBytes = &flatBytes[idx];
index+=(*this)->exactMidBytes_size; idx+=(*this)->exactMidBytes_size;
// 16 residualMidBits // 16 residualMidBits
(*this)->residualMidBits = &flatBytes[index]; (*this)->residualMidBits = &flatBytes[idx];
// calc residualMidBits_size // calc residualMidBits_size
(*this)->residualMidBits_size = flatBytesLength - 1 - 1 - MetaDataByteLength - pde_exe->SZ_SIZE_TYPE - 4 - 4 - 4 - 1 - 8 (*this)->residualMidBits_size = flatBytesLength - 1 - 1 - MetaDataByteLength - pde_exe->SZ_SIZE_TYPE - 4 - 4 - 4 - 1 - 8
......
...@@ -1683,7 +1683,7 @@ CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array) ...@@ -1683,7 +1683,7 @@ CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array)
return (int)size; return (int)size;
} }
static cJSON* get_array_item(const cJSON *array, size_t index) static cJSON* get_array_item(const cJSON *array, size_t idx)
{ {
cJSON *current_child = NULL; cJSON *current_child = NULL;
...@@ -1693,23 +1693,23 @@ static cJSON* get_array_item(const cJSON *array, size_t index) ...@@ -1693,23 +1693,23 @@ static cJSON* get_array_item(const cJSON *array, size_t index)
} }
current_child = array->child; current_child = array->child;
while ((current_child != NULL) && (index > 0)) while ((current_child != NULL) && (idx > 0))
{ {
index--; idx--;
current_child = current_child->next; current_child = current_child->next;
} }
return current_child; return current_child;
} }
CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index) CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int idx)
{ {
if (index < 0) if (idx < 0)
{ {
return NULL; return NULL;
} }
return get_array_item(array, (size_t)index); return get_array_item(array, (size_t)idx);
} }
static cJSON *get_object_item(const cJSON * const object, const char * const name, const cJSON_bool case_sensitive) static cJSON *get_object_item(const cJSON * const object, const char * const name, const cJSON_bool case_sensitive)
......
...@@ -98,11 +98,7 @@ pip install git+https://github.com/taosdata/taos-connector-python.git ...@@ -98,11 +98,7 @@ pip install git+https://github.com/taosdata/taos-connector-python.git
Just need to add `driver-go` dependency in `go.mod` . Just need to add `driver-go` dependency in `go.mod` .
```go-mod title=go.mod ```go-mod title=go.mod
module goexample {{#include docs/examples/go/go.mod}}
go 1.17
require github.com/taosdata/driver-go/v2 develop
``` ```
:::note :::note
......
...@@ -32,7 +32,6 @@ CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1]; ...@@ -32,7 +32,6 @@ CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1];
- cacheLast: [Description](/reference/config/#cachelast) - cacheLast: [Description](/reference/config/#cachelast)
- replica: [Description](/reference/config/#replica) - replica: [Description](/reference/config/#replica)
- quorum: [Description](/reference/config/#quorum) - quorum: [Description](/reference/config/#quorum)
- maxVgroupsPerDb: [Description](/reference/config/#maxvgroupsperdb)
- comp: [Description](/reference/config/#comp) - comp: [Description](/reference/config/#comp)
- precision: [Description](/reference/config/#precision) - precision: [Description](/reference/config/#precision)
6. Please note that all of the parameters mentioned in this section are configured in configuration file `taos.cfg` on the TDengine server. If not specified in the `create database` statement, the values from taos.cfg are used by default. To override default parameters, they must be specified in the `create database` statement. 6. Please note that all of the parameters mentioned in this section are configured in configuration file `taos.cfg` on the TDengine server. If not specified in the `create database` statement, the values from taos.cfg are used by default. To override default parameters, they must be specified in the `create database` statement.
......
...@@ -5,15 +5,15 @@ sidebar_label: Go ...@@ -5,15 +5,15 @@ sidebar_label: Go
title: TDengine Go Connector title: TDengine Go Connector
--- ---
import Tabs from '@theme/Tabs'; import Tabs from "@theme/Tabs";
import TabItem from '@theme/TabItem'; import TabItem from "@theme/TabItem";
import Preparation from "./_preparation.mdx" import Preparation from "./_preparation.mdx";
import GoInsert from "../../07-develop/03-insert-data/_go_sql.mdx" import GoInsert from "../../07-develop/03-insert-data/_go_sql.mdx";
import GoInfluxLine from "../../07-develop/03-insert-data/_go_line.mdx" import GoInfluxLine from "../../07-develop/03-insert-data/_go_line.mdx";
import GoOpenTSDBTelnet from "../../07-develop/03-insert-data/_go_opts_telnet.mdx" import GoOpenTSDBTelnet from "../../07-develop/03-insert-data/_go_opts_telnet.mdx";
import GoOpenTSDBJson from "../../07-develop/03-insert-data/_go_opts_json.mdx" import GoOpenTSDBJson from "../../07-develop/03-insert-data/_go_opts_json.mdx";
import GoQuery from "../../07-develop/04-query-data/_go.mdx" import GoQuery from "../../07-develop/04-query-data/_go.mdx";
`driver-go` is the official Go language connector for TDengine. It implements the [database/sql](https://golang.org/pkg/database/sql/) package, the generic Go language interface to SQL databases. Go developers can use it to develop applications that access TDengine cluster data. `driver-go` is the official Go language connector for TDengine. It implements the [database/sql](https://golang.org/pkg/database/sql/) package, the generic Go language interface to SQL databases. Go developers can use it to develop applications that access TDengine cluster data.
...@@ -38,18 +38,18 @@ Please refer to [version support list](/reference/connector#version-support) ...@@ -38,18 +38,18 @@ Please refer to [version support list](/reference/connector#version-support)
A "native connection" is established by the connector directly to the TDengine instance via the TDengine client driver (taosc). The supported functional features are: A "native connection" is established by the connector directly to the TDengine instance via the TDengine client driver (taosc). The supported functional features are:
* Normal queries - Normal queries
* Continuous queries - Continuous queries
* Subscriptions - Subscriptions
* schemaless interface - schemaless interface
* parameter binding interface - parameter binding interface
### REST connection ### REST connection
A "REST connection" is a connection between the application and the TDengine instance via the REST API provided by the taosAdapter component. The following features are supported: A "REST connection" is a connection between the application and the TDengine instance via the REST API provided by the taosAdapter component. The following features are supported:
* General queries - General queries
* Continuous queries - Continuous queries
## Installation steps ## Installation steps
...@@ -60,44 +60,44 @@ A "REST connection" is a connection between the application and the TDengine ins ...@@ -60,44 +60,44 @@ A "REST connection" is a connection between the application and the TDengine ins
Configure the environment variables and check the command. Configure the environment variables and check the command.
* `go env` - `go env`
* `gcc -v` - `gcc -v`
### Use go get to install ### Use go get to install
``` ```
go get -u github.com/taosdata/driver-go/v2@develop go get -u github.com/taosdata/driver-go/v2@latest
``` ```
### Manage with go mod ### Manage with go mod
1. Initialize the project with the `go mod` command. 1. Initialize the project with the `go mod` command.
```text ```text
go mod init taos-demo go mod init taos-demo
``` ```
2. Introduce taosSql 2. Introduce taosSql
```go ```go
import ( import (
"database/sql" "database/sql"
_ "github.com/taosdata/driver-go/v2/taosSql" _ "github.com/taosdata/driver-go/v2/taosSql"
) )
``` ```
3. Update the dependency packages with `go mod tidy`. 3. Update the dependency packages with `go mod tidy`.
```text ```text
go mod tidy go mod tidy
``` ```
4. Run the program with `go run taos-demo` or compile the binary with the `go build` command. 4. Run the program with `go run taos-demo` or compile the binary with the `go build` command.
```text ```text
go run taos-demo go run taos-demo
go build go build
``` ```
## Create a connection ## Create a connection
...@@ -105,7 +105,7 @@ go get -u github.com/taosdata/driver-go/v2@develop ...@@ -105,7 +105,7 @@ go get -u github.com/taosdata/driver-go/v2@develop
Data source names have a standard format, e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php), but no type prefix (square brackets indicate optionally): Data source names have a standard format, e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php), but no type prefix (square brackets indicate optionally):
``` text ```text
[username[:password]@][protocol[(address)]]/[dbname][?param1=value1&... &paramN=valueN] [username[:password]@][protocol[(address)]]/[dbname][?param1=value1&... &paramN=valueN]
``` ```
...@@ -124,7 +124,7 @@ _taosSql_ implements Go's `database/sql/driver` interface via cgo. You can use t ...@@ -124,7 +124,7 @@ _taosSql_ implements Go's `database/sql/driver` interface via cgo. You can use t
Use `taosSql` as `driverName` and use a correct [DSN](#DSN) as `dataSourceName`, DSN supports the following parameters. Use `taosSql` as `driverName` and use a correct [DSN](#DSN) as `dataSourceName`, DSN supports the following parameters.
* configPath specifies the `taos.cfg` directory - configPath specifies the `taos.cfg` directory
Example. Example.
...@@ -155,8 +155,8 @@ _taosRestful_ implements Go's `database/sql/driver` interface via `http client`. ...@@ -155,8 +155,8 @@ _taosRestful_ implements Go's `database/sql/driver` interface via `http client`.
Use `taosRestful` as `driverName` and use a correct [DSN](#DSN) as `dataSourceName` with the following parameters supported by the DSN. Use `taosRestful` as `driverName` and use a correct [DSN](#DSN) as `dataSourceName` with the following parameters supported by the DSN.
* `disableCompression` whether to accept compressed data, default is true do not accept compressed data, set to false if transferring data using gzip compression. - `disableCompression` whether to accept compressed data, default is true do not accept compressed data, set to false if transferring data using gzip compression.
* `readBufferSize` The default size of the buffer for reading data is 4K (4096), which can be adjusted upwards when the query result has a lot of data. - `readBufferSize` The default size of the buffer for reading data is 4K (4096), which can be adjusted upwards when the query result has a lot of data.
Example. Example.
...@@ -179,6 +179,7 @@ func main() { ...@@ -179,6 +179,7 @@ func main() {
} }
} }
``` ```
</TabItem> </TabItem>
</Tabs> </Tabs>
...@@ -208,8 +209,8 @@ func main() { ...@@ -208,8 +209,8 @@ func main() {
### More sample programs ### More sample programs
* [sample program](https://github.com/taosdata/TDengine/tree/develop/examples/go) - [sample program](https://github.com/taosdata/TDengine/tree/develop/examples/go)
* [Video tutorial](https://www.taosdata.com/blog/2020/11/11/1951.html). - [Video tutorial](https://www.taosdata.com/blog/2020/11/11/1951.html).
## Usage limitations ## Usage limitations
...@@ -269,56 +270,52 @@ func main() { ...@@ -269,56 +270,52 @@ func main() {
## Frequently Asked Questions ## Frequently Asked Questions
1. Cannot find the package `github.com/taosdata/driver-go/v2/taosRestful` 1. bind interface in database/sql crashes
Change the `github.com/taosdata/driver-go/v2` line in the require block of the `go.mod` file to `github.com/taosdata/driver-go/v2 develop`, then execute `go mod tidy`.
2. bind interface in database/sql crashes
REST does not support parameter binding related interface. It is recommended to use `db.Exec` and `db.Query`. REST does not support parameter binding related interface. It is recommended to use `db.Exec` and `db.Query`.
3. error `[0x217] Database not specified or available` after executing other statements with `use db` statement 2. error `[0x217] Database not specified or available` after executing other statements with `use db` statement
The execution of SQL command in the REST interface is not contextual, so using `use db` statement will not work, see the usage restrictions section above. The execution of SQL command in the REST interface is not contextual, so using `use db` statement will not work, see the usage restrictions section above.
4. use `taosSql` without error but use `taosRestful` with error `[0x217] Database not specified or available` 3. use `taosSql` without error but use `taosRestful` with error `[0x217] Database not specified or available`
Because the REST interface is stateless, using the `use db` statement will not take effect. See the usage restrictions section above. Because the REST interface is stateless, using the `use db` statement will not take effect. See the usage restrictions section above.
5. Upgrade `github.com/taosdata/driver-go/v2/taosRestful` 4. Upgrade `github.com/taosdata/driver-go/v2/taosRestful`
Change the `github.com/taosdata/driver-go/v2` line in the `go.mod` file to `github.com/taosdata/driver-go/v2 develop`, then execute `go mod tidy`. Change the `github.com/taosdata/driver-go/v2` line in the `go.mod` file to `github.com/taosdata/driver-go/v2 develop`, then execute `go mod tidy`.
6. `readBufferSize` parameter has no significant effect after being increased 5. `readBufferSize` parameter has no significant effect after being increased
Increasing `readBufferSize` will reduce the number of `syscall` calls when fetching results. If the query result is smaller, modifying this parameter will not improve performance significantly. If you increase the parameter value too much, the bottleneck will be parsing JSON data. If you need to optimize the query speed, you must adjust the value based on the actual situation to achieve the best query performance. Increasing `readBufferSize` will reduce the number of `syscall` calls when fetching results. If the query result is smaller, modifying this parameter will not improve performance significantly. If you increase the parameter value too much, the bottleneck will be parsing JSON data. If you need to optimize the query speed, you must adjust the value based on the actual situation to achieve the best query performance.
7. `disableCompression` parameter is set to `false` when the query efficiency is reduced 6. `disableCompression` parameter is set to `false` when the query efficiency is reduced
When set `disableCompression` parameter to `false`, the query result will be compressed by `gzip` and then transmitted, so you have to decompress the data by `gzip` after getting it. When set `disableCompression` parameter to `false`, the query result will be compressed by `gzip` and then transmitted, so you have to decompress the data by `gzip` after getting it.
8. `go get` command can't get the package, or timeout to get the package 7. `go get` command can't get the package, or timeout to get the package
Set Go proxy `go env -w GOPROXY=https://goproxy.cn,direct`. Set Go proxy `go env -w GOPROXY=https://goproxy.cn,direct`.
## Common APIs ## Common APIs
### database/sql API ### database/sql API
* `sql.Open(DRIVER_NAME string, dataSourceName string) *DB` - `sql.Open(DRIVER_NAME string, dataSourceName string) *DB`
Use This API to open a DB, returning an object of type \*DB. Use This API to open a DB, returning an object of type \*DB.
:::info :::info
This API is created successfully without checking permissions, but only when you execute a Query or Exec, and check if user/password/host/port is legal. This API is created successfully without checking permissions, but only when you execute a Query or Exec, and check if user/password/host/port is legal.
::: :::
* `func (db *DB) Exec(query string, args . .interface{}) (Result, error)` - `func (db *DB) Exec(query string, args . .interface{}) (Result, error)`
`sql.Open` built-in method to execute non-query related SQL. `sql.Open` built-in method to execute non-query related SQL.
* `func (db *DB) Query(query string, args ... . interface{}) (*Rows, error)` - `func (db *DB) Query(query string, args ... . interface{}) (*Rows, error)`
`sql.Open` Built-in method to execute query statements. `sql.Open` Built-in method to execute query statements.
...@@ -328,85 +325,85 @@ The `af` package encapsulates TDengine advanced functions such as connection man ...@@ -328,85 +325,85 @@ The `af` package encapsulates TDengine advanced functions such as connection man
#### Connection management #### Connection management
* `af.Open(host, user, pass, db string, port int) (*Connector, error)` - `af.Open(host, user, pass, db string, port int) (*Connector, error)`
This API creates a connection to taosd via cgo. This API creates a connection to taosd via cgo.
* `func (conn *Connector) Close() error` - `func (conn *Connector) Close() error`
Closes the connection. Closes the connection.
#### Subscribe to #### Subscribe to
* `func (conn *Connector) Subscribe(restart bool, topic string, sql string, interval time.Duration) (Subscriber, error)` - `func (conn *Connector) Subscribe(restart bool, topic string, sql string, interval time.Duration) (Subscriber, error)`
Subscribe to data. Subscribe to data.
* `func (s *taosSubscriber) Consume() (driver.Rows, error)` - `func (s *taosSubscriber) Consume() (driver.Rows, error)`
Consume the subscription data, returning the `Rows` structure of the `database/sql/driver` package. Consume the subscription data, returning the `Rows` structure of the `database/sql/driver` package.
* `func (s *taosSubscriber) Unsubscribe(keepProgress bool)` - `func (s *taosSubscriber) Unsubscribe(keepProgress bool)`
Unsubscribe from data. Unsubscribe from data.
#### schemaless #### schemaless
* `func (conn *Connector) InfluxDBInsertLines(lines []string, precision string) error` - `func (conn *Connector) InfluxDBInsertLines(lines []string, precision string) error`
Write to influxDB line protocol. Write to influxDB line protocol.
* `func (conn *Connector) OpenTSDBInsertTelnetLines(lines []string) error` - `func (conn *Connector) OpenTSDBInsertTelnetLines(lines []string) error`
Write OpenTDSB telnet protocol data. Write OpenTDSB telnet protocol data.
* `func (conn *Connector) OpenTSDBInsertJsonPayload(payload string) error` - `func (conn *Connector) OpenTSDBInsertJsonPayload(payload string) error`
Writes OpenTSDB JSON protocol data. Writes OpenTSDB JSON protocol data.
#### parameter binding #### parameter binding
* `func (conn *Connector) StmtExecute(sql string, params *param.Param) (res driver.Result, err error)` - `func (conn *Connector) StmtExecute(sql string, params *param.Param) (res driver.Result, err error)`
Parameter bound single row insert. Parameter bound single row insert.
* `func (conn *Connector) StmtQuery(sql string, params *param.Param) (rows driver.Rows, err error)` - `func (conn *Connector) StmtQuery(sql string, params *param.Param) (rows driver.Rows, err error)`
Parameter bound query that returns the `Rows` structure of the `database/sql/driver` package. Parameter bound query that returns the `Rows` structure of the `database/sql/driver` package.
* `func (conn *Connector) InsertStmt() *insertstmt. - `func (conn *Connector) InsertStmt() *insertstmt.
Initialize the parameters. Initialize the parameters.
* `func (stmt *InsertStmt) Prepare(sql string) error` - `func (stmt *InsertStmt) Prepare(sql string) error`
Parameter binding preprocessing SQL statement. Parameter binding preprocessing SQL statement.
* `func (stmt *InsertStmt) SetTableName(name string) error` - `func (stmt *InsertStmt) SetTableName(name string) error`
Bind the set table name parameter. Bind the set table name parameter.
* `func (stmt *InsertStmt) SetSubTableName(name string) error` - `func (stmt *InsertStmt) SetSubTableName(name string) error`
Parameter binding to set the sub table name. Parameter binding to set the sub table name.
* `func (stmt *InsertStmt) BindParam(params []*param.Param, bindType *param.ColumnType) error` - `func (stmt *InsertStmt) BindParam(params []*param.Param, bindType *param.ColumnType) error`
Parameter bind multiple rows of data. Parameter bind multiple rows of data.
* `func (stmt *InsertStmt) AddBatch() error` - `func (stmt *InsertStmt) AddBatch() error`
Add to a parameter-bound batch. Add to a parameter-bound batch.
* `func (stmt *InsertStmt) Execute() error` - `func (stmt *InsertStmt) Execute() error`
Execute a parameter binding. Execute a parameter binding.
* `func (stmt *InsertStmt) GetAffectedRows() int` - `func (stmt *InsertStmt) GetAffectedRows() int`
Gets the number of affected rows inserted by the parameter binding. Gets the number of affected rows inserted by the parameter binding.
* `func (stmt *InsertStmt) Close() error` - `func (stmt *InsertStmt) Close() error`
Closes the parameter binding. Closes the parameter binding.
......
...@@ -379,11 +379,11 @@ We still use the hypothetical environment from Chapter 4. There are three measur ...@@ -379,11 +379,11 @@ We still use the hypothetical environment from Chapter 4. There are three measur
### Storage resource estimation ### Storage resource estimation
Assuming that the number of sensor devices that generate data and need to be stored is `n`, the frequency of data generation is `t` per second, and the length of each record is `L` bytes, the scale of data generated per day is `n * t * L` bytes. Assuming the compression ratio is `C`, the daily data size is `(n * t * L)/C` bytes. The storage resources are estimated to accommodate the data scale for 1.5 years. In the production environment, the compression ratio C of TDengine is generally between 5 and 7. Assuming that the number of sensor devices that generate data and need to be stored is `n`, the frequency of data generation is `t` per second, and the length of each record is `L` bytes, the scale of data generated per day is `86400 * n * t * L` bytes. Assuming the compression ratio is `C`, the daily data size is `(86400 * n * t * L)/C` bytes. The storage resources are estimated to accommodate the data scale for 1.5 years. In the production environment, the compression ratio C of TDengine is generally between 5 and 7.
With additional 20% redundancy, you can calculate the required storage resources: With additional 20% redundancy, you can calculate the required storage resources:
```matlab ```matlab
(n * t * L) * (365 * 1.5) * (1+20%)/C (86400 * n * t * L) * (365 * 1.5) * (1+20%)/C
```` ````
Substituting in the above formula, the raw data generated every year is 11.8TB without considering the label information. Note that tag information is associated with each timeline in TDengine, not every record. The amount of data to be recorded is somewhat reduced relative to the generated data, and label data can be ignored as a whole. Assuming a compression ratio of 5, the size of the retained data ends up being 2.56 TB. Substituting in the above formula, the raw data generated every year is 11.8TB without considering the label information. Note that tag information is associated with each timeline in TDengine, not every record. The amount of data to be recorded is somewhat reduced relative to the generated data, and label data can be ignored as a whole. Assuming a compression ratio of 5, the size of the retained data ends up being 2.56 TB.
......
.vscode .vscode
*.lock *.lock
.idea .idea
\ No newline at end of file .env
\ No newline at end of file
.vscode
*.lock
\ No newline at end of file
...@@ -2,5 +2,4 @@ module goexample ...@@ -2,5 +2,4 @@ module goexample
go 1.17 go 1.17
require github.com/taosdata/driver-go/v2 develop require github.com/taosdata/driver-go/v2 latest
...@@ -24,6 +24,16 @@ ...@@ -24,6 +24,16 @@
<version>2.0.38</version> <version>2.0.38</version>
</dependency> </dependency>
<!-- ANCHOR_END: dep--> <!-- ANCHOR_END: dep-->
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>1.7.36</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>1.2.11</version>
</dependency>
<dependency> <dependency>
<groupId>junit</groupId> <groupId>junit</groupId>
<artifactId>junit</artifactId> <artifactId>junit</artifactId>
...@@ -31,5 +41,36 @@ ...@@ -31,5 +41,36 @@
<scope>test</scope> <scope>test</scope>
</dependency> </dependency>
</dependencies> </dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>2.5</version>
<configuration>
<skipTests>true</skipTests>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<executions>
<execution>
<id>copy-dependencies</id>
<phase>prepare-package</phase>
<goals>
<goal>copy-dependencies</goal>
</goals>
<configuration>
<outputDirectory>${project.build.directory}/lib</outputDirectory>
<overWriteReleases>false</overWriteReleases>
<overWriteSnapshots>false</overWriteSnapshots>
<overWriteIfNewer>true</overWriteIfNewer>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project> </project>
package com.taos.example.highvolume;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.sql.*;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
/**
* Prepare target database.
* Count total records in database periodically so that we can estimate the writing speed.
*/
class DataBaseMonitor {
private Connection conn;
private Statement stmt;
public DataBaseMonitor init() throws SQLException {
if (conn == null) {
String jdbcURL = System.getenv("TDENGINE_JDBC_URL");
conn = DriverManager.getConnection(jdbcURL);
stmt = conn.createStatement();
}
return this;
}
public void close() {
try {
stmt.close();
} catch (SQLException e) {
}
try {
conn.close();
} catch (SQLException e) {
}
}
public void prepareDatabase() throws SQLException {
stmt.execute("DROP DATABASE IF EXISTS test");
stmt.execute("CREATE DATABASE test");
stmt.execute("CREATE STABLE test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)");
}
public Long count() throws SQLException {
if (!stmt.isClosed()) {
ResultSet result = stmt.executeQuery("SELECT count(*) from test.meters");
result.next();
return result.getLong(1);
}
return null;
}
}
// ANCHOR: main
public class FastWriteExample {
final static Logger logger = LoggerFactory.getLogger(FastWriteExample.class);
final static int taskQueueCapacity = 10000000;
final static List<BlockingQueue<String>> taskQueues = new ArrayList<>();
final static List<ReadTask> readTasks = new ArrayList<>();
final static List<WriteTask> writeTasks = new ArrayList<>();
final static DataBaseMonitor databaseMonitor = new DataBaseMonitor();
public static void stopAll() {
logger.info("shutting down");
readTasks.forEach(task -> task.stop());
writeTasks.forEach(task -> task.stop());
databaseMonitor.close();
}
public static void main(String[] args) throws InterruptedException, SQLException {
int readTaskCount = args.length > 0 ? Integer.parseInt(args[0]) : 1;
int writeTaskCount = args.length > 1 ? Integer.parseInt(args[1]) : 3;
int tableCount = args.length > 2 ? Integer.parseInt(args[2]) : 1000;
int maxBatchSize = args.length > 3 ? Integer.parseInt(args[3]) : 3000;
logger.info("readTaskCount={}, writeTaskCount={} tableCount={} maxBatchSize={}",
readTaskCount, writeTaskCount, tableCount, maxBatchSize);
databaseMonitor.init().prepareDatabase();
// Create task queues, whiting tasks and start writing threads.
for (int i = 0; i < writeTaskCount; ++i) {
BlockingQueue<String> queue = new ArrayBlockingQueue<>(taskQueueCapacity);
taskQueues.add(queue);
WriteTask task = new WriteTask(queue, maxBatchSize);
Thread t = new Thread(task);
t.setName("WriteThread-" + i);
t.start();
}
// create reading tasks and start reading threads
int tableCountPerTask = tableCount / readTaskCount;
for (int i = 0; i < readTaskCount; ++i) {
ReadTask task = new ReadTask(i, taskQueues, tableCountPerTask);
Thread t = new Thread(task);
t.setName("ReadThread-" + i);
t.start();
}
Runtime.getRuntime().addShutdownHook(new Thread(FastWriteExample::stopAll));
long lastCount = 0;
while (true) {
Thread.sleep(10000);
long count = databaseMonitor.count();
logger.info("count={} speed={}", count, (count - lastCount) / 10);
lastCount = count;
}
}
}
// ANCHOR_END: main
\ No newline at end of file
package com.taos.example.highvolume;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.BlockingQueue;
/**
* Generate test data
*/
class MockDataSource implements Iterator {
private String tbNamePrefix;
private int tableCount;
private long maxRowsPerTable = 1000000000L;
// 100 milliseconds between two neighbouring rows.
long startMs = System.currentTimeMillis() - maxRowsPerTable * 100;
private int currentRow = 0;
private int currentTbId = -1;
// mock values
String[] location = {"LosAngeles", "SanDiego", "Hollywood", "Compton", "San Francisco"};
float[] current = {8.8f, 10.7f, 9.9f, 8.9f, 9.4f};
int[] voltage = {119, 116, 111, 113, 118};
float[] phase = {0.32f, 0.34f, 0.33f, 0.329f, 0.141f};
public MockDataSource(String tbNamePrefix, int tableCount) {
this.tbNamePrefix = tbNamePrefix;
this.tableCount = tableCount;
}
@Override
public boolean hasNext() {
currentTbId += 1;
if (currentTbId == tableCount) {
currentTbId = 0;
currentRow += 1;
}
return currentRow < maxRowsPerTable;
}
@Override
public String next() {
long ts = startMs + 100 * currentRow;
int groupId = currentTbId % 5 == 0 ? currentTbId / 5 : currentTbId / 5 + 1;
StringBuilder sb = new StringBuilder(tbNamePrefix + "_" + currentTbId + ","); // tbName
sb.append(ts).append(','); // ts
sb.append(current[currentRow % 5]).append(','); // current
sb.append(voltage[currentRow % 5]).append(','); // voltage
sb.append(phase[currentRow % 5]).append(','); // phase
sb.append(location[currentRow % 5]).append(','); // location
sb.append(groupId); // groupID
return sb.toString();
}
}
// ANCHOR: ReadTask
class ReadTask implements Runnable {
private final static Logger logger = LoggerFactory.getLogger(ReadTask.class);
private final int taskId;
private final List<BlockingQueue<String>> taskQueues;
private final int queueCount;
private final int tableCount;
private boolean active = true;
public ReadTask(int readTaskId, List<BlockingQueue<String>> queues, int tableCount) {
this.taskId = readTaskId;
this.taskQueues = queues;
this.queueCount = queues.size();
this.tableCount = tableCount;
}
/**
* Assign data received to different queues.
* Here we use the suffix number in table name.
* You are expected to define your own rule in practice.
*
* @param line record received
* @return which queue to use
*/
public int getQueueId(String line) {
String tbName = line.substring(0, line.indexOf(',')); // For example: tb1_101
String suffixNumber = tbName.split("_")[1];
return Integer.parseInt(suffixNumber) % this.queueCount;
}
@Override
public void run() {
logger.info("started");
Iterator<String> it = new MockDataSource("tb" + this.taskId, tableCount);
try {
while (it.hasNext() && active) {
String line = it.next();
int queueId = getQueueId(line);
taskQueues.get(queueId).put(line);
}
} catch (Exception e) {
logger.error("Read Task Error", e);
}
}
public void stop() {
logger.info("stop");
this.active = false;
}
}
// ANCHOR_END: ReadTask
\ No newline at end of file
package com.taos.example.highvolume;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.sql.*;
import java.util.HashMap;
import java.util.Map;
// ANCHOR: SQLWriter
/**
* A helper class encapsulate the logic of writing using SQL.
* <p>
* The main interfaces are two methods:
* <ol>
* <li>{@link SQLWriter#processLine}, which receive raw lines from WriteTask and group them by table names.</li>
* <li>{@link SQLWriter#flush}, which assemble INSERT statement and execute it.</li>
* </ol>
* <p>
* There is a technical skill worth mentioning: we create table as needed when "table does not exist" error occur instead of creating table automatically using syntax "INSET INTO tb USING stb".
* This ensure that checking table existence is a one-time-only operation.
* </p>
*
* </p>
*/
public class SQLWriter {
final static Logger logger = LoggerFactory.getLogger(SQLWriter.class);
private Connection conn;
private Statement stmt;
/**
* current number of buffered records
*/
private int bufferedCount = 0;
/**
* Maximum number of buffered records.
* Flush action will be triggered if bufferedCount reached this value,
*/
private int maxBatchSize;
/**
* Maximum SQL length.
*/
private int maxSQLLength;
/**
* Map from table name to column values. For example:
* "tb001" -> "(1648432611249,2.1,114,0.09) (1648432611250,2.2,135,0.2)"
*/
private Map<String, String> tbValues = new HashMap<>();
/**
* Map from table name to tag values in the same order as creating stable.
* Used for creating table.
*/
private Map<String, String> tbTags = new HashMap<>();
public SQLWriter(int maxBatchSize) {
this.maxBatchSize = maxBatchSize;
}
/**
* Get Database Connection
*
* @return Connection
* @throws SQLException
*/
private static Connection getConnection() throws SQLException {
String jdbcURL = System.getenv("TDENGINE_JDBC_URL");
return DriverManager.getConnection(jdbcURL);
}
/**
* Create Connection and Statement
*
* @throws SQLException
*/
public void init() throws SQLException {
conn = getConnection();
stmt = conn.createStatement();
stmt.execute("use test");
ResultSet rs = stmt.executeQuery("show variables");
while (rs.next()) {
String configName = rs.getString(1);
if ("maxSQLLength".equals(configName)) {
maxSQLLength = Integer.parseInt(rs.getString(2));
logger.info("maxSQLLength={}", maxSQLLength);
}
}
}
/**
* Convert raw data to SQL fragments, group them by table name and cache them in a HashMap.
* Trigger writing when number of buffered records reached maxBachSize.
*
* @param line raw data get from task queue in format: tbName,ts,current,voltage,phase,location,groupId
*/
public void processLine(String line) throws SQLException {
bufferedCount += 1;
int firstComma = line.indexOf(',');
String tbName = line.substring(0, firstComma);
int lastComma = line.lastIndexOf(',');
int secondLastComma = line.lastIndexOf(',', lastComma - 1);
String value = "(" + line.substring(firstComma + 1, secondLastComma) + ") ";
if (tbValues.containsKey(tbName)) {
tbValues.put(tbName, tbValues.get(tbName) + value);
} else {
tbValues.put(tbName, value);
}
if (!tbTags.containsKey(tbName)) {
String location = line.substring(secondLastComma + 1, lastComma);
String groupId = line.substring(lastComma + 1);
String tagValues = "('" + location + "'," + groupId + ')';
tbTags.put(tbName, tagValues);
}
if (bufferedCount == maxBatchSize) {
flush();
}
}
/**
* Assemble INSERT statement using buffered SQL fragments in Map {@link SQLWriter#tbValues} and execute it.
* In case of "Table does not exit" exception, create all tables in the sql and retry the sql.
*/
public void flush() throws SQLException {
StringBuilder sb = new StringBuilder("INSERT INTO ");
for (Map.Entry<String, String> entry : tbValues.entrySet()) {
String tableName = entry.getKey();
String values = entry.getValue();
String q = tableName + " values " + values + " ";
if (sb.length() + q.length() > maxSQLLength) {
executeSQL(sb.toString());
logger.warn("increase maxSQLLength or decrease maxBatchSize to gain better performance");
sb = new StringBuilder("INSERT INTO ");
}
sb.append(q);
}
executeSQL(sb.toString());
tbValues.clear();
bufferedCount = 0;
}
private void executeSQL(String sql) throws SQLException {
try {
stmt.executeUpdate(sql);
} catch (SQLException e) {
// convert to error code defined in taoserror.h
int errorCode = e.getErrorCode() & 0xffff;
if (errorCode == 0x362 || errorCode == 0x218) {
// Table does not exist
createTables();
stmt.executeUpdate(sql);
} else {
throw e;
}
}
}
/**
* Create tables in batch using syntax:
* <p>
* CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) [IF NOT EXISTS] tb_name2 USING stb_name TAGS (tag_value2, ...) ...;
* </p>
*/
private void createTables() throws SQLException {
StringBuilder sb = new StringBuilder("CREATE TABLE ");
for (String tbName : tbValues.keySet()) {
String tagValues = tbTags.get(tbName);
sb.append("IF NOT EXISTS ").append(tbName).append(" USING meters TAGS ").append(tagValues).append(" ");
}
String sql = sb.toString();
stmt.executeUpdate(sql);
}
public boolean hasBufferedValues() {
return bufferedCount > 0;
}
public int getBufferedCount() {
return bufferedCount;
}
public void close() {
try {
stmt.close();
} catch (SQLException e) {
}
try {
conn.close();
} catch (SQLException e) {
}
}
}
// ANCHOR_END: SQLWriter
package com.taos.example.highvolume;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.BlockingQueue;
// ANCHOR: WriteTask
class WriteTask implements Runnable {
private final static Logger logger = LoggerFactory.getLogger(WriteTask.class);
private final int maxBatchSize;
// the queue from which this writing task get raw data.
private final BlockingQueue<String> queue;
// A flag indicate whether to continue.
private boolean active = true;
public WriteTask(BlockingQueue<String> taskQueue, int maxBatchSize) {
this.queue = taskQueue;
this.maxBatchSize = maxBatchSize;
}
@Override
public void run() {
logger.info("started");
String line = null; // data getting from the queue just now.
SQLWriter writer = new SQLWriter(maxBatchSize);
try {
writer.init();
while (active) {
line = queue.poll();
if (line != null) {
// parse raw data and buffer the data.
writer.processLine(line);
} else if (writer.hasBufferedValues()) {
// write data immediately if no more data in the queue
writer.flush();
} else {
// sleep a while to avoid high CPU usage if no more data in the queue and no buffered records, .
Thread.sleep(100);
}
}
if (writer.hasBufferedValues()) {
writer.flush();
}
} catch (Exception e) {
String msg = String.format("line=%s, bufferedCount=%s", line, writer.getBufferedCount());
logger.error(msg, e);
} finally {
writer.close();
}
}
public void stop() {
logger.info("stop");
this.active = false;
}
}
// ANCHOR_END: WriteTask
\ No newline at end of file
# install dependencies:
# recommend python >= 3.8
# pip3 install faster-fifo
#
import logging
import sys
import time
import os
from multiprocessing import Process
from faster_fifo import Queue
from queue import Empty
from typing import List
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format="%(asctime)s [%(name)s] - %(message)s")
READ_TASK_COUNT = 1
WRITE_TASK_COUNT = 1
TABLE_COUNT = 1000
QUEUE_SIZE = 1000000
MAX_BATCH_SIZE = 3000
read_processes = []
write_processes = []
def get_connection():
"""
If variable TDENGINE_FIRST_EP is provided then it will be used. If not, firstEP in /etc/taos/taos.cfg will be used.
You can also override the default username and password by supply variable TDENGINE_USER and TDENGINE_PASSWORD
"""
import taos
firstEP = os.environ.get("TDENGINE_FIRST_EP")
if firstEP:
host, port = firstEP.split(":")
else:
host, port = None, 0
user = os.environ.get("TDENGINE_USER", "root")
password = os.environ.get("TDENGINE_PASSWORD", "taosdata")
return taos.connect(host=host, port=int(port), user=user, password=password)
# ANCHOR: MockDataSource
class MockDataSource:
location = ["LosAngeles", "SanDiego", "Hollywood", "Compton", "San Francisco"]
current = [8.8, 10.7, 9.9, 8.9, 9.4]
voltage = [119, 116, 111, 113, 118]
phase = [0.32, 0.34, 0.33, 0.329, 0.141]
max_rows_per_table = 10 ** 9
def __init__(self, tb_name_prefix, table_count):
self.table_name_prefix = tb_name_prefix
self.table_count = table_count
self.start_ms = round(time.time() * 1000) - self.max_rows_per_table * 100
def __iter__(self):
self.row = 0
self.table_id = -1
return self
def __next__(self):
"""
next 100 rows of current table
"""
self.table_id += 1
if self.table_id == self.table_count:
self.table_id = 0
if self.row >= self.max_rows_per_table:
raise StopIteration
rows = []
while len(rows) < 100:
self.row += 1
ts = self.start_ms + 100 * self.row
group_id = self.table_id % 5 if self.table_id % 5 == 0 else self.table_id % 5 + 1
tb_name = self.table_name_prefix + '_' + str(self.table_id)
ri = self.row % 5
rows.append(f"{tb_name},{ts},{self.current[ri]},{self.voltage[ri]},{self.phase[ri]},{self.location[ri]},{group_id}")
return self.table_id, rows
# ANCHOR_END: MockDataSource
# ANCHOR: read
def run_read_task(task_id: int, task_queues: List[Queue]):
table_count_per_task = TABLE_COUNT // READ_TASK_COUNT
data_source = MockDataSource(f"tb{task_id}", table_count_per_task)
try:
for table_id, rows in data_source:
# hash data to different queue
i = table_id % len(task_queues)
# block putting forever when the queue is full
task_queues[i].put_many(rows, block=True, timeout=-1)
except KeyboardInterrupt:
pass
# ANCHOR_END: read
# ANCHOR: write
def run_write_task(task_id: int, queue: Queue):
from sql_writer import SQLWriter
log = logging.getLogger(f"WriteTask-{task_id}")
writer = SQLWriter(get_connection)
lines = None
try:
while True:
try:
# get as many as possible
lines = queue.get_many(block=False, max_messages_to_get=MAX_BATCH_SIZE)
writer.process_lines(lines)
except Empty:
time.sleep(0.01)
except KeyboardInterrupt:
pass
except BaseException as e:
log.debug(f"lines={lines}")
raise e
# ANCHOR_END: write
def set_global_config():
argc = len(sys.argv)
if argc > 1:
global READ_TASK_COUNT
READ_TASK_COUNT = int(sys.argv[1])
if argc > 2:
global WRITE_TASK_COUNT
WRITE_TASK_COUNT = int(sys.argv[2])
if argc > 3:
global TABLE_COUNT
TABLE_COUNT = int(sys.argv[3])
if argc > 4:
global QUEUE_SIZE
QUEUE_SIZE = int(sys.argv[4])
if argc > 5:
global MAX_BATCH_SIZE
MAX_BATCH_SIZE = int(sys.argv[5])
# ANCHOR: monitor
def run_monitor_process():
import taos
log = logging.getLogger("DataBaseMonitor")
conn = get_connection()
conn.execute("DROP DATABASE IF EXISTS test")
conn.execute("CREATE DATABASE test")
conn.execute("CREATE STABLE test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) "
"TAGS (location BINARY(64), groupId INT)")
def get_count():
res = conn.query("SELECT count(*) FROM test.meters")
rows = res.fetch_all()
return rows[0][0] if rows else 0
last_count = 0
while True:
time.sleep(10)
count = get_count()
log.info(f"count={count} speed={(count - last_count) / 10}")
last_count = count
# ANCHOR_END: monitor
# ANCHOR: main
def main():
set_global_config()
logging.info(f"READ_TASK_COUNT={READ_TASK_COUNT}, WRITE_TASK_COUNT={WRITE_TASK_COUNT}, "
f"TABLE_COUNT={TABLE_COUNT}, QUEUE_SIZE={QUEUE_SIZE}, MAX_BATCH_SIZE={MAX_BATCH_SIZE}")
monitor_process = Process(target=run_monitor_process)
monitor_process.start()
time.sleep(3) # waiting for database ready.
task_queues: List[Queue] = []
# create task queues
for i in range(WRITE_TASK_COUNT):
queue = Queue(max_size_bytes=QUEUE_SIZE)
task_queues.append(queue)
# create write processes
for i in range(WRITE_TASK_COUNT):
p = Process(target=run_write_task, args=(i, task_queues[i]))
p.start()
logging.debug(f"WriteTask-{i} started with pid {p.pid}")
write_processes.append(p)
# create read processes
for i in range(READ_TASK_COUNT):
p = Process(target=run_read_task, args=(i, task_queues))
p.start()
logging.debug(f"ReadTask-{i} started with pid {p.pid}")
read_processes.append(p)
try:
monitor_process.join()
except KeyboardInterrupt:
monitor_process.terminate()
[p.terminate() for p in read_processes]
[p.terminate() for p in write_processes]
[q.close() for q in task_queues]
if __name__ == '__main__':
main()
# ANCHOR_END: main
import logging
import taos
class SQLWriter:
log = logging.getLogger("SQLWriter")
def __init__(self, get_connection_func):
self._tb_values = {}
self._tb_tags = {}
self._conn = get_connection_func()
self._max_sql_length = self.get_max_sql_length()
self._conn.execute("USE test")
def get_max_sql_length(self):
rows = self._conn.query("SHOW variables").fetch_all()
for r in rows:
name = r[0]
if name == "maxSQLLength":
return int(r[1])
def process_lines(self, lines: str):
"""
:param lines: [[tbName,ts,current,voltage,phase,location,groupId]]
"""
for line in lines:
ps = line.split(",")
table_name = ps[0]
value = '(' + ",".join(ps[1:-2]) + ') '
if table_name in self._tb_values:
self._tb_values[table_name] += value
else:
self._tb_values[table_name] = value
if table_name not in self._tb_tags:
location = ps[-2]
group_id = ps[-1]
tag_value = f"('{location}',{group_id})"
self._tb_tags[table_name] = tag_value
self.flush()
def flush(self):
"""
Assemble INSERT statement and execute it.
When the sql length grows close to MAX_SQL_LENGTH, the sql will be executed immediately, and a new INSERT statement will be created.
In case of "Table does not exit" exception, tables in the sql will be created and the sql will be re-executed.
"""
sql = "INSERT INTO "
sql_len = len(sql)
buf = []
for tb_name, values in self._tb_values.items():
q = tb_name + " VALUES " + values
if sql_len + len(q) >= self._max_sql_length:
sql += " ".join(buf)
self.execute_sql(sql)
sql = "INSERT INTO "
sql_len = len(sql)
buf = []
buf.append(q)
sql_len += len(q)
sql += " ".join(buf)
self.execute_sql(sql)
self._tb_values.clear()
def execute_sql(self, sql):
try:
self._conn.execute(sql)
except taos.Error as e:
error_code = e.errno & 0xffff
# Table does not exit
if error_code == 0x362 or error_code == 0x218:
self.create_tables()
else:
raise e
def create_tables(self):
sql = "CREATE TABLE "
for tb in self._tb_values.keys():
tag_values = self._tb_tags[tb]
sql += "IF NOT EXISTS " + tb + " USING meters TAGS " + tag_values + " "
self._conn.execute(sql)
...@@ -99,11 +99,7 @@ pip install git+https://github.com/taosdata/taos-connector-python.git ...@@ -99,11 +99,7 @@ pip install git+https://github.com/taosdata/taos-connector-python.git
编辑 `go.mod` 添加 `driver-go` 依赖即可。 编辑 `go.mod` 添加 `driver-go` 依赖即可。
```go-mod title=go.mod ```go-mod title=go.mod
module goexample {{#include docs/examples/go/go.mod}}
go 1.17
require github.com/taosdata/driver-go/v2 develop
``` ```
:::note :::note
......
---
title: 高效写入
---
## 高效写入原理 {#principle}
本节介绍如何高效地向 TDengine 写入数据。高效写入数据要考虑几个因素:数据在不同表(或子表)之间的分布,即要写入数据的相邻性;单次写入的数据量;并发连接数。一般来说,每批次只向同一张表(或子表)写入数据比向多张表(或子表)写入数据要更高效;每批次写入的数据量越大越高效(但超过一定阈值其优势会消失;同时写入数据的并发连接数越多写入越高效(但超过一定阈值反而会下降,取决于服务端处理能力)。
为了更高效地向 TDengine 写入数据,客户端程序要充分且恰当地利用以上几个因素。在单次写入中尽量只向同一张表(或子表)写入数据,每批次写入的数据量经过测试和调优设定为一个最适合当前系统处理能力的数值,并发写入的连接数同样经过测试和调优后设定为一个最适合当前系统处理能力的数值,以实现在当前系统中的最佳写入速度。同时,TDengine 还提供了独特的参数绑定写入,这也是一个有助于实现高效写入的方法。
为了使写入最高效,除了客户端程序的设计,服务端的配置也很重要。如果无论怎么调节客户端程序,taosd 进程的 CPU 使用率都很低,那很可能需要增加 vgroup 的数量。比如:数据库总表数是 1000 且 minTablesPerVnode 设置的也是 1000,那么这个数据至多有一个 vgroup。此时如果将 minTablesPerVnode 和 tablelncStepPerVnode 都设置成 100, 则这个数据库有可能用到 10 个 vgroup。更多性能调优参数请参考[配置参考](../../reference/config)性能调优部分。
## 高效写入方案 {#scenario}
下面的示例程序展示了如何高效写入数据:
- TDengine 客户端程序从消息队列或者其它数据源不断读入数据,在示例程序中采用生成模拟数据的方式来模拟读取数据源
- 单个连接向 TDengine 写入的速度无法与读数据的速度相匹配,因此客户端程序启动多个线程,每个线程都建立了与 TDengine 的连接,每个线程都有一个独占的固定大小的消息队列
- 客户端程序将接收到的数据根据所属的表名(或子表名)HASH 到不同的线程,即写入该线程所对应的消息队列,以此确保属于某个表(或子表)的数据一定会被一个固定的线程处理
- 各个子线程在将所关联的消息队列中的数据读空后或者读取数据量达到一个预定的阈值后将该批数据写入 TDengine,并继续处理后面接收到的数据
![TDengine 高效写入线程模型](highvolume.webp)
:::note
上图所示架构,每个写任务只负责写特定的表,体现了数据的相邻性原则。但是读任务所读的表,我们假设是随机的。这样一个队列有多个写入线程(或进程),队列内部可能产生锁的消耗。实际场景,如果能做到一个读任务对应一个写任务是最好的。
:::
## Java 示例程序 {#java-demo}
在 Java 示例程序中采用拼接 SQL 的写入方式。
### 主程序 {#java-demo-main}
主程序负责:
1. 创建消息队列
2. 启动写线程
3. 启动读线程
4. 每隔 10 秒统计一次写入速度
主程序默认暴露了 4 个参数,每次启动程序都可调节,用于测试和调优:
1. 读线程个数。默认为 1。
2. 写线程个数。默认为 3。
3. 模拟生成的总表数。默认为 1000。将会平分给各个读线程。
4. 每批最多写入记录数量。默认为 3000。
<details>
<summary>主程序</summary>
```java
{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java:main}}
```
</details>
队列容量(taskQueueCapacity)也是与性能有关的参数,可通过修改程序调节。一般来讲,队列容量越大,入队被阻塞的概率越小,队列的吞吐量越大,但是内存占用也会越大。
### 读任务的实现 {#java-demo-read}
读任务负责从数据源读数据。每个读任务都关联了一个模拟数据源。每个模拟数据源可生成一点数量表的数据。不同的模拟数据源生成不同表的数据。
读任务采用阻塞的方式写消息队列。也就是说,一旦队列满了,写操作就会阻塞。
<details>
<summary>读任务的实现</summary>
```java
{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java:ReadTask}}
```
</details>
### 写任务的实现 {#java-demo-write}
<details>
<summary>写任务的实现</summary>
```java
{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java:WriteTask}}
```
</details>
### SQLWriter 类的实现 {#java-demo-sql-writer}
SQLWriter 类封装了拼 SQL 和写数据的逻辑。注意,所有的表都没有提前创建,而是写入出错的时候,再以超级表为模板批量建表,然后重新执行 INSERT 语句。
<details>
<summary>SQLWriter 类的实现</summary>
```java
{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java:SQLWriter}}
```
</details>
### 执行示例程序 {#run-java-demo}
<details>
<summary>执行 Java 示例程序</summary>
执行程序前需配置环境变量 `TDENGINE_JDBC_URL`。如果 TDengine Server 部署在本机,且用户名、密码和端口都是默认值,那么可配置:
```
TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata"
```
#### 本地集成开发环境执行示例程序 {#java-demo-local-run}
1. clone TDengine 仓库
```
git clone git@github.com:taosdata/TDengine.git --depth 1
```
2. 用集成开发环境打开 `docs/examples/java` 目录。
3. 在开发环境中配置环境变量 `TDENGINE_JDBC_URL`。如果已配置了全局的环境变量 `TDENGINE_JDBC_URL` 可跳过这一步。
4. 运行类 `com.taos.example.highvolume.FastWriteExample`
#### 远程服务器上执行示例程序 {#java-demo-remote-run}
若要在服务器上执行示例程序,可按照下面的步骤操作:
1. 打包示例代码。在目录 TDengine/docs/examples/java 下执行:
```
mvn package
```
2. 远程服务器上创建 examples 目录:
```
mkdir -p examples/java
```
3. 复制依赖到服务器指定目录:
- 复制依赖包,只用复制一次
```
scp -r .\target\lib <user>@<host>:~/examples/java
```
- 复制本程序的 jar 包,每次更新代码都需要复制
```
scp -r .\target\javaexample-1.0.jar <user>@<host>:~/examples/java
```
4. 配置环境变量。
编辑 `~/.bash_profile``~/.bashrc` 添加如下内容例如:
```
export TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata"
```
以上使用的是本地部署 TDengine Server 时默认的 JDBC URL。你需要根据自己的实际情况更改。
5. 用 java 命令启动示例程序,命令模板:
```
java -classpath lib/*:javaexample-1.0.jar com.taos.example.highvolume.FastWriteExample <read_thread_count> <white_thread_count> <total_table_count> <max_batch_size>
```
6. 结束测试程序。测试程序不会自动结束,在获取到当前配置下稳定的写入速度后,按 <kbd>CTRL</kbd> + <kbd>C</kbd> 结束程序。
下面是一次实际运行的截图:
```
[testuser@vm95 java]$ java -classpath lib/*:javaexample-1.0.jar com.taos.example.highvolume.FastWriteExample 1 9 1000 2000
17:01:01.131 [main] INFO c.t.e.highvolume.FastWriteExample - readTaskCount=1, writeTaskCount=9 tableCount=1000 maxBatchSize=2000
17:01:01.286 [WriteThread-0] INFO c.taos.example.highvolume.WriteTask - started
17:01:01.354 [WriteThread-1] INFO c.taos.example.highvolume.WriteTask - started
17:01:01.360 [WriteThread-2] INFO c.taos.example.highvolume.WriteTask - started
17:01:01.366 [WriteThread-3] INFO c.taos.example.highvolume.WriteTask - started
17:01:01.433 [WriteThread-4] INFO c.taos.example.highvolume.WriteTask - started
17:01:01.438 [WriteThread-5] INFO c.taos.example.highvolume.WriteTask - started
17:01:01.443 [WriteThread-6] INFO c.taos.example.highvolume.WriteTask - started
17:01:01.448 [WriteThread-7] INFO c.taos.example.highvolume.WriteTask - started
17:01:01.454 [WriteThread-8] INFO c.taos.example.highvolume.WriteTask - started
17:01:01.454 [ReadThread-0] INFO com.taos.example.highvolume.ReadTask - started
17:01:11.615 [main] INFO c.t.e.highvolume.FastWriteExample - count=18766442 speed=1876644
17:01:21.775 [main] INFO c.t.e.highvolume.FastWriteExample - count=38947464 speed=2018102
17:01:32.428 [main] INFO c.t.e.highvolume.FastWriteExample - count=58649571 speed=1970210
17:01:42.577 [main] INFO c.t.e.highvolume.FastWriteExample - count=79264890 speed=2061531
17:01:53.265 [main] INFO c.t.e.highvolume.FastWriteExample - count=99097476 speed=1983258
17:02:04.209 [main] INFO c.t.e.highvolume.FastWriteExample - count=119546779 speed=2044930
17:02:14.935 [main] INFO c.t.e.highvolume.FastWriteExample - count=141078914 speed=2153213
17:02:25.617 [main] INFO c.t.e.highvolume.FastWriteExample - count=162183457 speed=2110454
17:02:36.718 [main] INFO c.t.e.highvolume.FastWriteExample - count=182735614 speed=2055215
17:02:46.988 [main] INFO c.t.e.highvolume.FastWriteExample - count=202895614 speed=2016000
```
</details>
## Python 示例程序 {#python-demo}
该 Python 示例程序中采用了多进程的架构,并使用了跨进程的队列通信。写任务采用拼装 SQL 的方式写入。
### main 函数 {#python-demo-main}
main 函数负责创建消息队列和启动子进程,子进程有 3 类:
1. 1 个监控进程,负责数据库初始化和统计写入速度
2. n 个读进程,负责从其它数据系统读数据
3. m 个写进程,负责写数据库
main 函数可以接收 5 个启动参数,依次是:
1. 读任务(进程)数, 默认为 1
2. 写任务(进程)数, 默认为 1
3. 模拟生成的总表数,默认为 1000
4. 队列大小(单位字节),默认为 1000000
5. 每批最多写入记录数量, 默认为 3000
<details>
<summary>main 函数</summary>
```python
{{#include docs/examples/python/highvolume_faster_queue.py:main}}
```
</details>
### 监控进程
监控进程负责初始化数据库,并监控当前的写入速度。
<details>
<summary>Monitor Process</summary>
```python
{{#include docs/examples/python/highvolume_faster_queue.py:monitor}}
```
</details>
### 读进程 {#python-read-process}
#### 读进程主要逻辑 {#python-run-read-task}
读进程,负责从其它数据系统读数据,并分发数据到各个写进程。
<details>
<summary>run_read_task 函数</summary>
```python
{{#include docs/examples/python/highvolume_faster_queue.py:read}}
```
</details>
#### 模拟数据源 {#python-mock-data-source}
以下是模拟数据源的实现,我们假设数据源生成的每一条数据都带有目标表名信息。实际中你可能需要一定的规则确定目标表名。
<details>
<summary>MockDataSource</summary>
```python
{{#include docs/examples/python/highvolume_faster_queue.py:MockDataSource}}
```
</details>
### 写进程 {#python-write-process}
写进程每次从队列中取出尽量多的数据,并批量写入。
<details>
<summary>run_write_task 函数</summary>
```python
{{#include docs/examples/python/highvolume_faster_queue.py:write}}
```
</details>
### SQLWriter 类的实现 {#python-sql-writer}
SQLWriter 类封装了拼 SQL 和写数据的逻辑。所有的表都没有提前创建,而是写入出错的时候,再以超级表为模板批量建表,然后重新执行 INSERT 语句。这个类也对 SQL 是否超过最大长度限制做了检查,如果接近 SQL 最大长度限制(maxSQLLength),将会立即执行 SQL。为了减少 SQL 执行次数,建议将 maxSQLLength 适当调大。
<details>
<summary>SQLWriter</summary>
```python
{{#include docs/examples/python/sql_writer.py}}
```
</details>
### 执行示例程序 {#run-python-demo}
<details>
<summary>执行 Python 示例程序</summary>
1. 前提条件
- 已安装 TDengine 客户端驱动
- 已安装 Python3, 推荐版本 >= 3.8
- 已安装 taospy
2. 安装 faster-fifo 代替 python 内置的 multiprocessing.Queue
```
pip3 install faster-fifo
```
3. 点击上面的“查看源码”链接复制 `highvolume_faster_queue.py``sql_writer.py` 两个文件。
4. 执行示例程序
```
python3 highvolume_faster_queue.py <READ_TASK_COUNT> <WRITE_TASK_COUNT> <TABLE_COUNT> <QUEUE_SIZE> <MAX_BATCH_SIZE>
```
下面是一次实际运行的输出:
```
[testuser@vm95 python]$ python3.6 highvolume_faster_queue.py 9 9 1000 5000000 3000
2022-07-13 10:05:50,504 [root] - READ_TASK_COUNT=9, WRITE_TASK_COUNT=9, TABLE_COUNT=1000, QUEUE_SIZE=5000000, MAX_BATCH_SIZE=3000
2022-07-13 10:05:53,542 [root] - WriteTask-0 started with pid 5475
2022-07-13 10:05:53,542 [root] - WriteTask-1 started with pid 5476
2022-07-13 10:05:53,543 [root] - WriteTask-2 started with pid 5477
2022-07-13 10:05:53,543 [root] - WriteTask-3 started with pid 5478
2022-07-13 10:05:53,544 [root] - WriteTask-4 started with pid 5479
2022-07-13 10:05:53,544 [root] - WriteTask-5 started with pid 5480
2022-07-13 10:05:53,545 [root] - WriteTask-6 started with pid 5481
2022-07-13 10:05:53,546 [root] - WriteTask-7 started with pid 5482
2022-07-13 10:05:53,546 [root] - WriteTask-8 started with pid 5483
2022-07-13 10:05:53,547 [root] - ReadTask-0 started with pid 5484
2022-07-13 10:05:53,548 [root] - ReadTask-1 started with pid 5485
2022-07-13 10:05:53,549 [root] - ReadTask-2 started with pid 5486
2022-07-13 10:05:53,550 [root] - ReadTask-3 started with pid 5487
2022-07-13 10:05:53,551 [root] - ReadTask-4 started with pid 5488
2022-07-13 10:05:53,552 [root] - ReadTask-5 started with pid 5489
2022-07-13 10:05:53,552 [root] - ReadTask-6 started with pid 5490
2022-07-13 10:05:53,553 [root] - ReadTask-7 started with pid 5491
2022-07-13 10:05:53,554 [root] - ReadTask-8 started with pid 5492
2022-07-13 10:06:00,842 [DataBaseMonitor] - count=6612939 speed=661293.9
2022-07-13 10:06:11,151 [DataBaseMonitor] - count=14765739 speed=815280.0
2022-07-13 10:06:21,677 [DataBaseMonitor] - count=23282163 speed=851642.4
2022-07-13 10:06:31,985 [DataBaseMonitor] - count=31673139 speed=839097.6
2022-07-13 10:06:42,343 [DataBaseMonitor] - count=39819439 speed=814630.0
2022-07-13 10:06:52,830 [DataBaseMonitor] - count=48146339 speed=832690.0
2022-07-13 10:07:03,396 [DataBaseMonitor] - count=56385039 speed=823870.0
2022-07-13 10:07:14,341 [DataBaseMonitor] - count=64848739 speed=846370.0
2022-07-13 10:07:24,877 [DataBaseMonitor] - count=73654566 speed=880582.7
```
</details>
...@@ -32,7 +32,6 @@ CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1]; ...@@ -32,7 +32,6 @@ CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1];
- cacheLast: [详细说明](/reference/config/#cachelast) - cacheLast: [详细说明](/reference/config/#cachelast)
- replica: [详细说明](/reference/config/#replica) - replica: [详细说明](/reference/config/#replica)
- quorum: [详细说明](/reference/config/#quorum) - quorum: [详细说明](/reference/config/#quorum)
- maxVgroupsPerDb: [详细说明](/reference/config/#maxvgroupsperdb)
- comp: [详细说明](/reference/config/#comp) - comp: [详细说明](/reference/config/#comp)
- precision: [详细说明](/reference/config/#precision) - precision: [详细说明](/reference/config/#precision)
6. 请注意上面列出的所有参数都可以配置在配置文件 `taosd.cfg` 中作为创建数据库时使用的默认配置, `create database` 的参数中明确指定的会覆盖配置文件中的设置。 6. 请注意上面列出的所有参数都可以配置在配置文件 `taosd.cfg` 中作为创建数据库时使用的默认配置, `create database` 的参数中明确指定的会覆盖配置文件中的设置。
......
...@@ -5,15 +5,15 @@ sidebar_label: Go ...@@ -5,15 +5,15 @@ sidebar_label: Go
title: TDengine Go Connector title: TDengine Go Connector
--- ---
import Tabs from '@theme/Tabs'; import Tabs from "@theme/Tabs";
import TabItem from '@theme/TabItem'; import TabItem from "@theme/TabItem";
import Preparition from "./_preparition.mdx" import Preparition from "./_preparition.mdx";
import GoInsert from "../../07-develop/03-insert-data/_go_sql.mdx" import GoInsert from "../../07-develop/03-insert-data/_go_sql.mdx";
import GoInfluxLine from "../../07-develop/03-insert-data/_go_line.mdx" import GoInfluxLine from "../../07-develop/03-insert-data/_go_line.mdx";
import GoOpenTSDBTelnet from "../../07-develop/03-insert-data/_go_opts_telnet.mdx" import GoOpenTSDBTelnet from "../../07-develop/03-insert-data/_go_opts_telnet.mdx";
import GoOpenTSDBJson from "../../07-develop/03-insert-data/_go_opts_json.mdx" import GoOpenTSDBJson from "../../07-develop/03-insert-data/_go_opts_json.mdx";
import GoQuery from "../../07-develop/04-query-data/_go.mdx" import GoQuery from "../../07-develop/04-query-data/_go.mdx";
`driver-go` TDengine 的官方 Go 语言连接器,实现了 Go 语言[ database/sql ](https://golang.org/pkg/database/sql/) 包的接口。Go 开发人员可以通过它开发存取 TDengine 集群数据的应用软件。 `driver-go` TDengine 的官方 Go 语言连接器,实现了 Go 语言[ database/sql ](https://golang.org/pkg/database/sql/) 包的接口。Go 开发人员可以通过它开发存取 TDengine 集群数据的应用软件。
...@@ -38,64 +38,64 @@ REST 连接支持所有能运行 Go 的平台。 ...@@ -38,64 +38,64 @@ REST 连接支持所有能运行 Go 的平台。
“原生连接”指连接器通过 TDengine 客户端驱动(taosc)直接与 TDengine 运行实例建立的连接。支持的功能特性有: “原生连接”指连接器通过 TDengine 客户端驱动(taosc)直接与 TDengine 运行实例建立的连接。支持的功能特性有:
* 普通查询 - 普通查询
* 连续查询 - 连续查询
* 订阅 - 订阅
* schemaless 接口 - schemaless 接口
* 参数绑定接口 - 参数绑定接口
### REST 连接 ### REST 连接
"REST 连接"指连接器通过 taosAdapter 组件提供的 REST API TDengine 运行实例建立的连接。支持的功能特性有: "REST 连接"指连接器通过 taosAdapter 组件提供的 REST API TDengine 运行实例建立的连接。支持的功能特性有:
* 普通查询 - 普通查询
* 连续查询 - 连续查询
## 安装步骤 ## 安装步骤
### 安装前准备 ### 安装前准备
* 安装 Go 开发环境(Go 1.14 及以上,GCC 4.8.5 及以上) - 安装 Go 开发环境(Go 1.14 及以上,GCC 4.8.5 及以上)
* 如果使用原生连接器,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](/reference/connector#安装客户端驱动) - 如果使用原生连接器,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](/reference/connector#安装客户端驱动)
配置好环境变量,检查命令: 配置好环境变量,检查命令:
* ```go env``` - `go env`
* ```gcc -v``` - `gcc -v`
### 使用 go get 安装 ### 使用 go get 安装
`go get -u github.com/taosdata/driver-go/v2@develop` `go get -u github.com/taosdata/driver-go/v2@latest`
### 使用 go mod 管理 ### 使用 go mod 管理
1. 使用 `go mod` 命令初始化项目: 1. 使用 `go mod` 命令初始化项目:
```text ```text
go mod init taos-demo go mod init taos-demo
``` ```
2. 引入 taosSql 2. 引入 taosSql
```go ```go
import ( import (
"database/sql" "database/sql"
_ "github.com/taosdata/driver-go/v2/taosSql" _ "github.com/taosdata/driver-go/v2/taosSql"
) )
``` ```
3. 使用 `go mod tidy` 更新依赖包: 3. 使用 `go mod tidy` 更新依赖包:
```text ```text
go mod tidy go mod tidy
``` ```
4. 使用 `go run taos-demo` 运行程序或使用 `go build` 命令编译出二进制文件。 4. 使用 `go run taos-demo` 运行程序或使用 `go build` 命令编译出二进制文件。
```text ```text
go run taos-demo go run taos-demo
go build go build
``` ```
## 建立连接 ## 建立连接
...@@ -103,7 +103,7 @@ REST 连接支持所有能运行 Go 的平台。 ...@@ -103,7 +103,7 @@ REST 连接支持所有能运行 Go 的平台。
数据源名称具有通用格式,例如 [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php),但没有类型前缀(方括号表示可选): 数据源名称具有通用格式,例如 [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php),但没有类型前缀(方括号表示可选):
``` text ```text
[username[:password]@][protocol[(address)]]/[dbname][?param1=value1&...&paramN=valueN] [username[:password]@][protocol[(address)]]/[dbname][?param1=value1&...&paramN=valueN]
``` ```
...@@ -112,6 +112,7 @@ REST 连接支持所有能运行 Go 的平台。 ...@@ -112,6 +112,7 @@ REST 连接支持所有能运行 Go 的平台。
```text ```text
username:password@protocol(address)/dbname?param=value username:password@protocol(address)/dbname?param=value
``` ```
### 使用连接器进行连接 ### 使用连接器进行连接
<Tabs defaultValue="native"> <Tabs defaultValue="native">
...@@ -121,7 +122,7 @@ _taosSql_ 通过 cgo 实现了 Go 的 `database/sql/driver` 接口。只需要 ...@@ -121,7 +122,7 @@ _taosSql_ 通过 cgo 实现了 Go 的 `database/sql/driver` 接口。只需要
使用 `taosSql` 作为 `driverName` 并且使用一个正确的 [DSN](#DSN) 作为 `dataSourceName`DSN 支持的参数: 使用 `taosSql` 作为 `driverName` 并且使用一个正确的 [DSN](#DSN) 作为 `dataSourceName`DSN 支持的参数:
* configPath 指定 taos.cfg 目录 - configPath 指定 taos.cfg 目录
示例: 示例:
...@@ -152,8 +153,8 @@ _taosRestful_ 通过 `http client` 实现了 Go 的 `database/sql/driver` 接口 ...@@ -152,8 +153,8 @@ _taosRestful_ 通过 `http client` 实现了 Go 的 `database/sql/driver` 接口
使用 `taosRestful` 作为 `driverName` 并且使用一个正确的 [DSN](#DSN) 作为 `dataSourceName`DSN 支持的参数: 使用 `taosRestful` 作为 `driverName` 并且使用一个正确的 [DSN](#DSN) 作为 `dataSourceName`DSN 支持的参数:
* `disableCompression` 是否接受压缩数据,默认为 true 不接受压缩数据,如果传输数据使用 gzip 压缩设置为 false - `disableCompression` 是否接受压缩数据,默认为 true 不接受压缩数据,如果传输数据使用 gzip 压缩设置为 false
* `readBufferSize` 读取数据的缓存区大小默认为 4K4096),当查询结果数据量多时可以适当调大该值。 - `readBufferSize` 读取数据的缓存区大小默认为 4K4096),当查询结果数据量多时可以适当调大该值。
示例: 示例:
...@@ -176,6 +177,7 @@ func main() { ...@@ -176,6 +177,7 @@ func main() {
} }
} }
``` ```
</TabItem> </TabItem>
</Tabs> </Tabs>
...@@ -205,8 +207,8 @@ func main() { ...@@ -205,8 +207,8 @@ func main() {
### 更多示例程序 ### 更多示例程序
* [示例程序](https://github.com/taosdata/TDengine/tree/develop/examples/go) - [示例程序](https://github.com/taosdata/TDengine/tree/develop/examples/go)
* [视频教程](https://www.taosdata.com/blog/2020/11/11/1951.html) - [视频教程](https://www.taosdata.com/blog/2020/11/11/1951.html)
## 使用限制 ## 使用限制
...@@ -266,55 +268,51 @@ func main() { ...@@ -266,55 +268,51 @@ func main() {
## 常见问题 ## 常见问题
1. 无法找到包 `github.com/taosdata/driver-go/v2/taosRestful` 1. database/sql stmt(参数绑定)相关接口崩溃
`go.mod` require 块对`github.com/taosdata/driver-go/v2`的引用改为`github.com/taosdata/driver-go/v2 develop`,之后执行 `go mod tidy`
2. database/sql stmt(参数绑定)相关接口崩溃
REST 不支持参数绑定相关接口,建议使用`db.Exec``db.Query` REST 不支持参数绑定相关接口,建议使用`db.Exec``db.Query`
3. 使用 `use db` 语句后执行其他语句报错 `[0x217] Database not specified or available` 2. 使用 `use db` 语句后执行其他语句报错 `[0x217] Database not specified or available`
REST 接口中 SQL 语句的执行无上下文关联,使用 `use db` 语句不会生效,解决办法见上方使用限制章节。 REST 接口中 SQL 语句的执行无上下文关联,使用 `use db` 语句不会生效,解决办法见上方使用限制章节。
4. 使用 taosSql 不报错使用 taosRestful 报错 `[0x217] Database not specified or available` 3. 使用 taosSql 不报错使用 taosRestful 报错 `[0x217] Database not specified or available`
因为 REST 接口无状态,使用 `use db` 语句不会生效,解决办法见上方使用限制章节。 因为 REST 接口无状态,使用 `use db` 语句不会生效,解决办法见上方使用限制章节。
5. 升级 `github.com/taosdata/driver-go/v2/taosRestful` 4. 升级 `github.com/taosdata/driver-go/v2/taosRestful`
`go.mod` 文件中对 `github.com/taosdata/driver-go/v2` 的引用改为 `github.com/taosdata/driver-go/v2 develop`,之后执行 `go mod tidy` `go.mod` 文件中对 `github.com/taosdata/driver-go/v2` 的引用改为 `github.com/taosdata/driver-go/v2 develop`,之后执行 `go mod tidy`
6. `readBufferSize` 参数调大后无明显效果 5. `readBufferSize` 参数调大后无明显效果
`readBufferSize` 调大后会减少获取结果时 `syscall` 的调用。如果查询结果的数据量不大,修改该参数不会带来明显提升,如果该参数修改过大,瓶颈会在解析 JSON 数据。如果需要优化查询速度,需要根据实际情况调整该值来达到查询效果最优。 `readBufferSize` 调大后会减少获取结果时 `syscall` 的调用。如果查询结果的数据量不大,修改该参数不会带来明显提升,如果该参数修改过大,瓶颈会在解析 JSON 数据。如果需要优化查询速度,需要根据实际情况调整该值来达到查询效果最优。
7. `disableCompression` 参数设置为 `false` 时查询效率降低 6. `disableCompression` 参数设置为 `false` 时查询效率降低
`disableCompression` 参数设置为 `false` 时查询结果会使用 `gzip` 压缩后传输,拿到数据后要先进行 `gzip` 解压。 `disableCompression` 参数设置为 `false` 时查询结果会使用 `gzip` 压缩后传输,拿到数据后要先进行 `gzip` 解压。
8. `go get` 命令无法获取包,或者获取包超时 7. `go get` 命令无法获取包,或者获取包超时
设置 Go 代理 `go env -w GOPROXY=https://goproxy.cn,direct` 设置 Go 代理 `go env -w GOPROXY=https://goproxy.cn,direct`
## 常用 API ## 常用 API
### database/sql API ### database/sql API
* `sql.Open(DRIVER_NAME string, dataSourceName string) *DB` - `sql.Open(DRIVER_NAME string, dataSourceName string) *DB`
API 用来打开 DB,返回一个类型为 \*DB 的对象。 API 用来打开 DB,返回一个类型为 \*DB 的对象。
:::info :::info
API 成功创建的时候,并没有做权限等检查,只有在真正执行 Query 或者 Exec 的时候才能真正的去创建连接,并同时检查 user/password/host/port 是不是合法。 API 成功创建的时候,并没有做权限等检查,只有在真正执行 Query 或者 Exec 的时候才能真正的去创建连接,并同时检查 user/password/host/port 是不是合法。
::: :::
* `func (db *DB) Exec(query string, args ...interface{}) (Result, error)` - `func (db *DB) Exec(query string, args ...interface{}) (Result, error)`
`sql.Open` 内置的方法,用来执行非查询相关 SQL `sql.Open` 内置的方法,用来执行非查询相关 SQL
* `func (db *DB) Query(query string, args ...interface{}) (*Rows, error)` - `func (db *DB) Query(query string, args ...interface{}) (*Rows, error)`
`sql.Open` 内置的方法,用来执行查询语句。 `sql.Open` 内置的方法,用来执行查询语句。
...@@ -324,85 +322,85 @@ func main() { ...@@ -324,85 +322,85 @@ func main() {
#### 连接管理 #### 连接管理
* `af.Open(host, user, pass, db string, port int) (*Connector, error)` - `af.Open(host, user, pass, db string, port int) (*Connector, error)`
API 通过 cgo 创建与 taosd 的连接。 API 通过 cgo 创建与 taosd 的连接。
* `func (conn *Connector) Close() error` - `func (conn *Connector) Close() error`
关闭与 taosd 的连接。 关闭与 taosd 的连接。
#### 订阅 #### 订阅
* `func (conn *Connector) Subscribe(restart bool, topic string, sql string, interval time.Duration) (Subscriber, error)` - `func (conn *Connector) Subscribe(restart bool, topic string, sql string, interval time.Duration) (Subscriber, error)`
订阅数据。 订阅数据。
* `func (s *taosSubscriber) Consume() (driver.Rows, error)` - `func (s *taosSubscriber) Consume() (driver.Rows, error)`
消费订阅数据,返回 `database/sql/driver` 包的 `Rows` 结构。 消费订阅数据,返回 `database/sql/driver` 包的 `Rows` 结构。
* `func (s *taosSubscriber) Unsubscribe(keepProgress bool)` - `func (s *taosSubscriber) Unsubscribe(keepProgress bool)`
取消订阅数据。 取消订阅数据。
#### schemaless #### schemaless
* `func (conn *Connector) InfluxDBInsertLines(lines []string, precision string) error` - `func (conn *Connector) InfluxDBInsertLines(lines []string, precision string) error`
写入 influxDB 行协议。 写入 influxDB 行协议。
* `func (conn *Connector) OpenTSDBInsertTelnetLines(lines []string) error` - `func (conn *Connector) OpenTSDBInsertTelnetLines(lines []string) error`
写入 OpenTDSB telnet 协议数据。 写入 OpenTDSB telnet 协议数据。
* `func (conn *Connector) OpenTSDBInsertJsonPayload(payload string) error` - `func (conn *Connector) OpenTSDBInsertJsonPayload(payload string) error`
写入 OpenTSDB JSON 协议数据。 写入 OpenTSDB JSON 协议数据。
#### 参数绑定 #### 参数绑定
* `func (conn *Connector) StmtExecute(sql string, params *param.Param) (res driver.Result, err error)` - `func (conn *Connector) StmtExecute(sql string, params *param.Param) (res driver.Result, err error)`
参数绑定单行插入。 参数绑定单行插入。
* `func (conn *Connector) StmtQuery(sql string, params *param.Param) (rows driver.Rows, err error)` - `func (conn *Connector) StmtQuery(sql string, params *param.Param) (rows driver.Rows, err error)`
参数绑定查询,返回 `database/sql/driver` 包的 `Rows` 结构。 参数绑定查询,返回 `database/sql/driver` 包的 `Rows` 结构。
* `func (conn *Connector) InsertStmt() *insertstmt.InsertStmt` - `func (conn *Connector) InsertStmt() *insertstmt.InsertStmt`
初始化参数。 初始化参数。
* `func (stmt *InsertStmt) Prepare(sql string) error` - `func (stmt *InsertStmt) Prepare(sql string) error`
参数绑定预处理 SQL 语句。 参数绑定预处理 SQL 语句。
* `func (stmt *InsertStmt) SetTableName(name string) error` - `func (stmt *InsertStmt) SetTableName(name string) error`
参数绑定设置表名。 参数绑定设置表名。
* `func (stmt *InsertStmt) SetSubTableName(name string) error` - `func (stmt *InsertStmt) SetSubTableName(name string) error`
参数绑定设置子表名。 参数绑定设置子表名。
* `func (stmt *InsertStmt) BindParam(params []*param.Param, bindType *param.ColumnType) error` - `func (stmt *InsertStmt) BindParam(params []*param.Param, bindType *param.ColumnType) error`
参数绑定多行数据。 参数绑定多行数据。
* `func (stmt *InsertStmt) AddBatch() error` - `func (stmt *InsertStmt) AddBatch() error`
添加到参数绑定批处理。 添加到参数绑定批处理。
* `func (stmt *InsertStmt) Execute() error` - `func (stmt *InsertStmt) Execute() error`
执行参数绑定。 执行参数绑定。
* `func (stmt *InsertStmt) GetAffectedRows() int` - `func (stmt *InsertStmt) GetAffectedRows() int`
获取参数绑定插入受影响行数。 获取参数绑定插入受影响行数。
* `func (stmt *InsertStmt) Close() error` - `func (stmt *InsertStmt) Close() error`
结束参数绑定。 结束参数绑定。
......
...@@ -368,7 +368,7 @@ public class ParameterBindingDemo { ...@@ -368,7 +368,7 @@ public class ParameterBindingDemo {
private static final String host = "127.0.0.1"; private static final String host = "127.0.0.1";
private static final Random random = new Random(System.currentTimeMillis()); private static final Random random = new Random(System.currentTimeMillis());
private static final int BINARY_COLUMN_SIZE = 20; private static final int BINARY_COLUMN_SIZE = 30;
private static final String[] schemaList = { private static final String[] schemaList = {
"create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)", "create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)",
"create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)", "create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)",
......
...@@ -367,10 +367,10 @@ WHERE ts>=1510560000 AND ts<=1515000009 ...@@ -367,10 +367,10 @@ WHERE ts>=1510560000 AND ts<=1515000009
### 存储资源估算 ### 存储资源估算
假设产生数据并需要存储的传感器设备数量为 `n`,数据生成的频率为`t`条/秒,每条记录的长度为 `L` bytes,则每天产生的数据规模为 `n×t×L` bytes。假设压缩比为 C,则每日产生数据规模为 `(n×t×L)/C` bytes。存储资源预估为能够容纳 1.5 年的数据规模,生产环境下 TDengine 的压缩比 C 一般在 5 ~ 7 之间,同时为最后结果增加 20% 的冗余,可计算得到需要存储资源: 假设产生数据并需要存储的传感器设备数量为 `n`,数据生成的频率为`t`条/秒,每条记录的长度为 `L` bytes,则每天产生的数据规模为 `86400×n×t×L` bytes。假设压缩比为 C,则每日产生数据规模为 `(86400×n×t×L)/C` bytes。存储资源预估为能够容纳 1.5 年的数据规模,生产环境下 TDengine 的压缩比 C 一般在 5 ~ 7 之间,同时为最后结果增加 20% 的冗余,可计算得到需要存储资源:
```matlab ```matlab
(n×t×L)×(365×1.5)×(1+20%)/C (86400×n×t×L)×(365×1.5)×(1+20%)/C
``` ```
结合以上的计算公式,将参数带入计算公式,在不考虑标签信息的情况下,每年产生的原始数据规模是 11.8TB。需要注意的是,由于标签信息在 TDengine 中关联到每个时间线,并不是每条记录。所以需要记录的数据量规模相对于产生的数据有一定的降低,而这部分标签数据整体上可以忽略不记。假设压缩比为 5,则保留的数据规模最终为 2.56 TB。 结合以上的计算公式,将参数带入计算公式,在不考虑标签信息的情况下,每年产生的原始数据规模是 11.8TB。需要注意的是,由于标签信息在 TDengine 中关联到每个时间线,并不是每条记录。所以需要记录的数据量规模相对于产生的数据有一定的降低,而这部分标签数据整体上可以忽略不记。假设压缩比为 5,则保留的数据规模最终为 2.56 TB。
......
...@@ -389,7 +389,7 @@ namespace TDengineDriver ...@@ -389,7 +389,7 @@ namespace TDengineDriver
static void ExitProgram() static void ExitProgram()
{ {
System.Environment.Exit(0); System.Environment.Exit(1);
} }
public void cleanup() public void cleanup()
......
using System;
using TDengineDriver;
using System.Runtime.InteropServices;
using System.Text;
using System.Collections.Generic;
namespace Test.UtilsTools.ResultSet
{
public class ResultSet
{
private List<TDengineMeta> resultMeta;
private List<Object> resultData;
// private bool isValidResult = false;
public ResultSet(IntPtr res)
{
resultMeta = UtilsTools.GetResField(res);
resultData = UtilsTools.GetResData(res);
}
public ResultSet(List<TDengineMeta> metas, List<Object> datas)
{
resultMeta = metas;
resultData = datas;
}
public List<Object> GetResultData()
{
return resultData;
}
public List<TDengineMeta> GetResultMeta()
{
return resultMeta;
}
public int GetFieldsNum()
{
return resultMeta.Count;
}
}
}
using System;
using TDengineDriver;
using System.Runtime.InteropServices;
using System.Text;
using System.Collections.Generic;
namespace Test.UtilsTools
{
public class UtilsTools
{
static string ip = "127.0.0.1";
static string user = "root";
static string password = "taosdata";
static string db = "";
static short port = 0;
//get a tdengine connection
public static IntPtr TDConnection()
{
TDengine.Options((int)TDengineInitOption.TDDB_OPTION_CONFIGDIR, GetConfigPath());
TDengine.Options((int)TDengineInitOption.TDDB_OPTION_SHELL_ACTIVITY_TIMER, "60");
TDengine.Init();
IntPtr conn = TDengine.Connect(ip, user, password, db, port);
return conn;
}
//get taos.cfg file based on different os
public static string GetConfigPath()
{
string configDir = "" ;
if(OperatingSystem.IsOSPlatform("Windows"))
{
configDir = "C:/TDengine/cfg";
}
else if(OperatingSystem.IsOSPlatform("Linux"))
{
configDir = "/etc/taos";
}
else if(OperatingSystem.IsOSPlatform("macOS"))
{
configDir = "/etc/taos";
}
return configDir;
}
public static IntPtr ExecuteQuery(IntPtr conn, String sql)
{
IntPtr res = TDengine.Query(conn, sql);
if (!IsValidResult(res))
{
Console.Write(sql.ToString() + " failure, ");
ExitProgram();
}
else
{
Console.WriteLine(sql.ToString() + " success");
}
return res;
}
public static IntPtr ExecuteErrorQuery(IntPtr conn, String sql)
{
IntPtr res = TDengine.Query(conn, sql);
if (!IsValidResult(res))
{
Console.Write(sql.ToString() + " failure, ");
ExitProgram();
}
else
{
Console.WriteLine(sql.ToString() + " success");
}
return res;
}
public static void ExecuteUpdate(IntPtr conn, String sql)
{
IntPtr res = TDengine.Query(conn, sql);
if (!IsValidResult(res))
{
Console.Write(sql.ToString() + " failure, ");
ExitProgram();
}
else
{
Console.WriteLine(sql.ToString() + " success");
}
TDengine.FreeResult(res);
}
public static bool IsValidResult(IntPtr res)
{
if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0))
{
if (res != IntPtr.Zero)
{
Console.Write("reason: " + TDengine.Error(res));
return false;
}
Console.WriteLine("");
return false;
}
return true;
}
public static void CloseConnection(IntPtr conn)
{
if (conn != IntPtr.Zero)
{
if (TDengine.Close(conn) == 0)
{
Console.WriteLine("close connection sucess");
}
else
{
Console.WriteLine("close Connection failed");
}
}
TDengine.Cleanup();
}
public static List<TDengineMeta> GetResField(IntPtr res)
{
List<TDengineMeta> metas = TDengine.FetchFields(res);
return metas;
}
public static void AssertEqual(string expectVal, string actualVal)
{
if (expectVal == actualVal)
{
Console.WriteLine("{0}=={1} pass", expectVal, actualVal);
}
else
{
Console.WriteLine("{0}=={1} failed", expectVal, actualVal);
ExitProgram();
}
}
public static void ExitProgram()
{
TDengine.Cleanup();
System.Environment.Exit(1);
}
public static List<Object> GetResData(IntPtr res)
{
List<Object> dataRaw = new List<Object>();
if (!IsValidResult(res))
{
ExitProgram();
}
List<TDengineMeta> metas = GetResField(res);
dataRaw = QueryRes(res, metas);
return dataRaw;
}
public static TDengineMeta ConstructTDengineMeta(string name, string type)
{
TDengineMeta _meta = new TDengineMeta();
_meta.name = name;
char[] separators = new char[] { '(', ')' };
string[] subs = type.Split(separators, StringSplitOptions.RemoveEmptyEntries);
switch (subs[0].ToUpper())
{
case "BOOL":
_meta.type = 1;
_meta.size = 1;
break;
case "TINYINT":
_meta.type = 2;
_meta.size = 1;
break;
case "SMALLINT":
_meta.type = 3;
_meta.size = 2;
break;
case "INT":
_meta.type = 4;
_meta.size = 4;
break;
case "BIGINT":
_meta.type = 5;
_meta.size = 8;
break;
case "TINYINT UNSIGNED":
_meta.type = 11;
_meta.size = 1;
break;
case "SMALLINT UNSIGNED":
_meta.type = 12;
_meta.size = 2;
break;
case "INT UNSIGNED":
_meta.type = 13;
_meta.size = 4;
break;
case "BIGINT UNSIGNED":
_meta.type = 14;
_meta.size = 8;
break;
case "FLOAT":
_meta.type = 6;
_meta.size = 4;
break;
case "DOUBLE":
_meta.type = 7;
_meta.size = 8;
break;
case "BINARY":
_meta.type = 8;
_meta.size = short.Parse(subs[1]);
break;
case "TIMESTAMP":
_meta.type = 9;
_meta.size = 8;
break;
case "NCHAR":
_meta.type = 10;
_meta.size = short.Parse(subs[1]);
break;
case "JSON":
_meta.type = 15;
_meta.size = 4096;
break;
default:
_meta.type = byte.MaxValue;
_meta.size = 0;
break;
}
return _meta;
}
private static List<Object> QueryRes(IntPtr res, List<TDengineMeta> metas)
{
IntPtr rowdata;
long queryRows = 0;
List<Object> dataRaw = new List<Object>();
int fieldCount = metas.Count;
while ((rowdata = TDengine.FetchRows(res)) != IntPtr.Zero)
{
queryRows++;
IntPtr colLengthPtr = TDengine.FetchLengths(res);
int[] colLengthArr = new int[fieldCount];
Marshal.Copy(colLengthPtr, colLengthArr, 0, fieldCount);
for (int fields = 0; fields < fieldCount; ++fields)
{
TDengineMeta meta = metas[fields];
int offset = IntPtr.Size * fields;
IntPtr data = Marshal.ReadIntPtr(rowdata, offset);
if (data == IntPtr.Zero)
{
dataRaw.Add("NULL");
continue;
}
switch ((TDengineDataType)meta.type)
{
case TDengineDataType.TSDB_DATA_TYPE_BOOL:
bool v1 = Marshal.ReadByte(data) == 0 ? false : true;
dataRaw.Add(v1);
break;
case TDengineDataType.TSDB_DATA_TYPE_TINYINT:
sbyte v2 = (sbyte)Marshal.ReadByte(data);
dataRaw.Add(v2);
break;
case TDengineDataType.TSDB_DATA_TYPE_SMALLINT:
short v3 = Marshal.ReadInt16(data);
dataRaw.Add(v3);
break;
case TDengineDataType.TSDB_DATA_TYPE_INT:
int v4 = Marshal.ReadInt32(data);
dataRaw.Add(v4);
break;
case TDengineDataType.TSDB_DATA_TYPE_BIGINT:
long v5 = Marshal.ReadInt64(data);
dataRaw.Add(v5);
break;
case TDengineDataType.TSDB_DATA_TYPE_FLOAT:
float v6 = (float)Marshal.PtrToStructure(data, typeof(float));
dataRaw.Add(v6);
break;
case TDengineDataType.TSDB_DATA_TYPE_DOUBLE:
double v7 = (double)Marshal.PtrToStructure(data, typeof(double));
dataRaw.Add(v7);
break;
case TDengineDataType.TSDB_DATA_TYPE_BINARY:
// string v8 = Marshal.PtrToStringAnsi(data, colLengthArr[fields]);
string v8 = Marshal.PtrToStringUTF8(data, colLengthArr[fields]);
dataRaw.Add(v8);
break;
case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP:
long v9 = Marshal.ReadInt64(data);
dataRaw.Add(v9);
break;
case TDengineDataType.TSDB_DATA_TYPE_NCHAR:
// string v10 = Marshal.PtrToStringAnsi(data, colLengthArr[fields]);
string v10 = Marshal.PtrToStringUTF8(data, colLengthArr[fields]);
dataRaw.Add(v10);
break;
case TDengineDataType.TSDB_DATA_TYPE_UTINYINT:
byte v12 = Marshal.ReadByte(data);
dataRaw.Add(v12);
break;
case TDengineDataType.TSDB_DATA_TYPE_USMALLINT:
ushort v13 = (ushort)Marshal.ReadInt16(data);
dataRaw.Add(v13);
break;
case TDengineDataType.TSDB_DATA_TYPE_UINT:
uint v14 = (uint)Marshal.ReadInt32(data);
dataRaw.Add(v14);
break;
case TDengineDataType.TSDB_DATA_TYPE_UBIGINT:
ulong v15 = (ulong)Marshal.ReadInt64(data);
dataRaw.Add(v15);
break;
default:
dataRaw.Add("unknown value");
break;
}
}
}
if (TDengine.ErrorNo(res) != 0)
{
Console.Write("Query is not complete, Error {0:G}", TDengine.ErrorNo(res), TDengine.Error(res));
}
TDengine.FreeResult(res);
Console.WriteLine("");
return dataRaw;
}
// Generate insert sql for the with the coldata and tag data
public static string ConstructInsertSql(string table,string stable,List<Object> colData,List<Object> tagData,int numOfRows)
{
int numofFileds = colData.Count / numOfRows;
StringBuilder insertSql;
if (stable == "")
{
insertSql = new StringBuilder($"insert into {table} values(");
}
else
{
insertSql = new StringBuilder($"insert into {table} using {stable} tags(");
for (int j = 0; j < tagData.Count; j++)
{
if (tagData[j] is String)
{
insertSql.Append('\'');
insertSql.Append(tagData[j]);
insertSql.Append('\'');
}
else
{
insertSql.Append(tagData[j]);
}
if (j + 1 != tagData.Count)
{
insertSql.Append(',');
}
}
insertSql.Append(")values(");
}
for (int i = 0; i < colData.Count; i++)
{
if (colData[i] is String)
{
insertSql.Append('\'');
insertSql.Append(colData[i]);
insertSql.Append('\'');
}
else
{
insertSql.Append(colData[i]);
}
if ((i + 1) % numofFileds == 0 && (i + 1) != colData.Count)
{
insertSql.Append(")(");
}
else if ((i + 1) == colData.Count)
{
insertSql.Append(')');
}
else
{
insertSql.Append(',');
}
}
insertSql.Append(';');
//Console.WriteLine(insertSql.ToString());
return insertSql.ToString();
}
public static List<object> CombineColAndTagData(List<object> colData,List<object> tagData, int numOfRows)
{
var list = new List<Object>();
for (int i = 0; i < colData.Count; i++)
{
list.Add(colData[i]);
if ((i + 1) % (colData.Count / numOfRows) == 0)
{
for (int j = 0; j < tagData.Count; j++)
{
list.Add(tagData[j]);
}
}
}
return list;
}
}
}
...@@ -11,7 +11,7 @@ namespace Cases ...@@ -11,7 +11,7 @@ namespace Cases
IntPtr conn = IntPtr.Zero; IntPtr conn = IntPtr.Zero;
Console.WriteLine("===================JsonTagTest===================="); Console.WriteLine("===================JsonTagTest====================");
conn = conn = UtilsTools.TDConnection("127.0.0.1", "root", "taosdata", "", 0); conn = conn = UtilsTools.TDConnection("127.0.0.1", "root", "taosdata", "", 0);
UtilsTools.ExecuteUpdate(conn, "create database if not exists csharp_sample keep 3650"); UtilsTools.ExecuteUpdate(conn, "create database if not exists csharp keep 3650");
UtilsTools.ExecuteUpdate(conn, "use csharp"); UtilsTools.ExecuteUpdate(conn, "use csharp");
JsonTagSample jsonTagSample = new JsonTagSample(); JsonTagSample jsonTagSample = new JsonTagSample();
jsonTagSample.Test(conn); jsonTagSample.Test(conn);
......
...@@ -217,10 +217,10 @@ namespace Utils ...@@ -217,10 +217,10 @@ namespace Utils
} }
} }
} }
public static void ExitProgram() public static void ExitProgram(int i = 1)
{ {
TDengine.Cleanup(); TDengine.Cleanup();
System.Environment.Exit(0); System.Environment.Exit(i);
} }
} }
} }
\ No newline at end of file
...@@ -5,8 +5,8 @@ ...@@ -5,8 +5,8 @@
<TargetFramework>net5.0</TargetFramework> <TargetFramework>net5.0</TargetFramework>
</PropertyGroup> </PropertyGroup>
<ItemGroup> <ItemGroup>
<PackageReference Include="TDengine.Connector" Version="1.0.3" /> <PackageReference Include="TDengine.Connector" Version="1.0.3" />
</ItemGroup> </ItemGroup>
</Project> </Project>
...@@ -5,8 +5,8 @@ ...@@ -5,8 +5,8 @@
<TargetFramework>net5.0</TargetFramework> <TargetFramework>net5.0</TargetFramework>
</PropertyGroup> </PropertyGroup>
<ItemGroup> <ItemGroup>
<PackageReference Include="TDengine.Connector" Version="1.0.2" /> <PackageReference Include="TDengine.Connector" Version="1.0.2" />
</ItemGroup> </ItemGroup>
</Project> </Project>
...@@ -289,7 +289,7 @@ namespace TDengineDriver ...@@ -289,7 +289,7 @@ namespace TDengineDriver
static void ExitProgram() static void ExitProgram()
{ {
System.Environment.Exit(0); System.Environment.Exit(1);
} }
public void cleanup() public void cleanup()
......
...@@ -543,7 +543,7 @@ namespace TDengineDriver ...@@ -543,7 +543,7 @@ namespace TDengineDriver
public static void ExitProgram() public static void ExitProgram()
{ {
TDengine.Cleanup(); TDengine.Cleanup();
System.Environment.Exit(0); System.Environment.Exit(1);
} }
} }
} }
...@@ -7,7 +7,6 @@ LFLAGS = '-Wl,-rpath,/usr/local/taos/driver/' -ltaos -lpthread -lm -lrt ...@@ -7,7 +7,6 @@ LFLAGS = '-Wl,-rpath,/usr/local/taos/driver/' -ltaos -lpthread -lm -lrt
CFLAGS = -O3 -g -Wall -Wno-deprecated -fPIC -Wno-unused-result -Wconversion \ CFLAGS = -O3 -g -Wall -Wno-deprecated -fPIC -Wno-unused-result -Wconversion \
-Wno-char-subscripts -D_REENTRANT -Wno-format -D_REENTRANT -DLINUX \ -Wno-char-subscripts -D_REENTRANT -Wno-format -D_REENTRANT -DLINUX \
-Wno-unused-function -D_M_X64 -I/usr/local/taos/include -std=gnu99 \ -Wno-unused-function -D_M_X64 -I/usr/local/taos/include -std=gnu99 \
-I../../../deps/cJson/inc \
-Wno-unused-function -D_M_X64 -I/usr/local/taos/include -std=gnu99 \ -Wno-unused-function -D_M_X64 -I/usr/local/taos/include -std=gnu99 \
-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment -fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment
......
...@@ -33,12 +33,12 @@ typedef struct SCompareParam { ...@@ -33,12 +33,12 @@ typedef struct SCompareParam {
int32_t groupOrderType; int32_t groupOrderType;
} SCompareParam; } SCompareParam;
static bool needToMerge(SSDataBlock* pBlock, SArray* columnIndexList, int32_t index, char **buf) { static bool needToMerge(SSDataBlock* pBlock, SArray* columnIndexList, int32_t idx, char **buf) {
int32_t ret = 0; int32_t ret = 0;
size_t size = taosArrayGetSize(columnIndexList); size_t size = taosArrayGetSize(columnIndexList);
if (size > 0) { if (size > 0) {
ret = compare_aRv(pBlock, columnIndexList, (int32_t) size, index, buf, TSDB_ORDER_ASC); ret = compare_aRv(pBlock, columnIndexList, (int32_t) size, idx, buf, TSDB_ORDER_ASC);
} }
// if ret == 0, means the result belongs to the same group // if ret == 0, means the result belongs to the same group
...@@ -563,9 +563,9 @@ static void savePrevOrderColumns(char** prevRow, SArray* pColumnList, SSDataBloc ...@@ -563,9 +563,9 @@ static void savePrevOrderColumns(char** prevRow, SArray* pColumnList, SSDataBloc
int32_t size = (int32_t) taosArrayGetSize(pColumnList); int32_t size = (int32_t) taosArrayGetSize(pColumnList);
for(int32_t i = 0; i < size; ++i) { for(int32_t i = 0; i < size; ++i) {
SColIndex* index = taosArrayGet(pColumnList, i); SColIndex* idx = taosArrayGet(pColumnList, i);
SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, index->colIndex); SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, idx->colIndex);
assert(index->colId == pColInfo->info.colId); assert(idx->colId == pColInfo->info.colId);
memcpy(prevRow[i], pColInfo->pData + pColInfo->info.bytes * rowIndex, pColInfo->info.bytes); memcpy(prevRow[i], pColInfo->pData + pColInfo->info.bytes * rowIndex, pColInfo->info.bytes);
} }
...@@ -603,7 +603,7 @@ static void doMergeResultImpl(SOperatorInfo* pInfo, SQLFunctionCtx *pCtx, int32_ ...@@ -603,7 +603,7 @@ static void doMergeResultImpl(SOperatorInfo* pInfo, SQLFunctionCtx *pCtx, int32_
for (int32_t j = 0; j < numOfExpr; ++j) { for (int32_t j = 0; j < numOfExpr; ++j) {
int32_t functionId = pCtx[j].functionId; int32_t functionId = pCtx[j].functionId;
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) { if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_COL_DUMMY) {
continue; continue;
} }
...@@ -625,7 +625,7 @@ static void doMergeResultImpl(SOperatorInfo* pInfo, SQLFunctionCtx *pCtx, int32_ ...@@ -625,7 +625,7 @@ static void doMergeResultImpl(SOperatorInfo* pInfo, SQLFunctionCtx *pCtx, int32_
static void doFinalizeResultImpl(SMultiwayMergeInfo* pInfo, SQLFunctionCtx *pCtx, int32_t numOfExpr) { static void doFinalizeResultImpl(SMultiwayMergeInfo* pInfo, SQLFunctionCtx *pCtx, int32_t numOfExpr) {
for(int32_t j = 0; j < numOfExpr; ++j) { for(int32_t j = 0; j < numOfExpr; ++j) {
int32_t functionId = pCtx[j].functionId; int32_t functionId = pCtx[j].functionId;
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) { if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_COL_DUMMY) {
continue; continue;
} }
......
...@@ -152,7 +152,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) { ...@@ -152,7 +152,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols, int32_t typeColLength, static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols, int32_t typeColLength,
int32_t noteColLength) { int32_t noteColLength) {
int32_t rowLen = 0; int32_t rowLen = 0;
SColumnIndex index = {0}; SColumnIndex idx = {0, 0};
pSql->cmd.numOfCols = numOfCols; pSql->cmd.numOfCols = numOfCols;
...@@ -163,7 +163,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols, ...@@ -163,7 +163,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
tstrncpy(f.name, "Field", sizeof(f.name)); tstrncpy(f.name, "Field", sizeof(f.name));
SInternalField* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); SInternalField* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &idx, TSDB_DATA_TYPE_BINARY,
(TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE, -1000, (TSDB_COL_NAME_LEN - 1), false); (TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE, -1000, (TSDB_COL_NAME_LEN - 1), false);
rowLen += ((TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE); rowLen += ((TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE);
...@@ -173,7 +173,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols, ...@@ -173,7 +173,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
tstrncpy(f.name, "Type", sizeof(f.name)); tstrncpy(f.name, "Type", sizeof(f.name));
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (int16_t)(typeColLength + VARSTR_HEADER_SIZE), pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &idx, TSDB_DATA_TYPE_BINARY, (int16_t)(typeColLength + VARSTR_HEADER_SIZE),
-1000, typeColLength, false); -1000, typeColLength, false);
rowLen += typeColLength + VARSTR_HEADER_SIZE; rowLen += typeColLength + VARSTR_HEADER_SIZE;
...@@ -183,7 +183,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols, ...@@ -183,7 +183,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
tstrncpy(f.name, "Length", sizeof(f.name)); tstrncpy(f.name, "Length", sizeof(f.name));
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_INT, sizeof(int32_t), pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &idx, TSDB_DATA_TYPE_INT, sizeof(int32_t),
-1000, sizeof(int32_t), false); -1000, sizeof(int32_t), false);
rowLen += sizeof(int32_t); rowLen += sizeof(int32_t);
...@@ -193,7 +193,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols, ...@@ -193,7 +193,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
tstrncpy(f.name, "Note", sizeof(f.name)); tstrncpy(f.name, "Note", sizeof(f.name));
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (int16_t)(noteColLength + VARSTR_HEADER_SIZE), pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &idx, TSDB_DATA_TYPE_BINARY, (int16_t)(noteColLength + VARSTR_HEADER_SIZE),
-1000, noteColLength, false); -1000, noteColLength, false);
rowLen += noteColLength + VARSTR_HEADER_SIZE; rowLen += noteColLength + VARSTR_HEADER_SIZE;
...@@ -415,7 +415,7 @@ static int32_t tscGetTableTagValue(SCreateBuilder *builder, char *result) { ...@@ -415,7 +415,7 @@ static int32_t tscGetTableTagValue(SCreateBuilder *builder, char *result) {
static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const char *ddl) { static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const char *ddl) {
int32_t rowLen = 0; int32_t rowLen = 0;
int16_t ddlLen = (int16_t)strlen(ddl); int16_t ddlLen = (int16_t)strlen(ddl);
SColumnIndex index = {0}; SColumnIndex idx = {0};
pSql->cmd.numOfCols = 2; pSql->cmd.numOfCols = 2;
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd); SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
...@@ -433,7 +433,7 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const ...@@ -433,7 +433,7 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const
} }
SInternalField* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); SInternalField* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, f.bytes, -1000, f.bytes - VARSTR_HEADER_SIZE, false); pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &idx, TSDB_DATA_TYPE_BINARY, f.bytes, -1000, f.bytes - VARSTR_HEADER_SIZE, false);
rowLen += f.bytes; rowLen += f.bytes;
...@@ -446,7 +446,7 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const ...@@ -446,7 +446,7 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const
} }
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &idx, TSDB_DATA_TYPE_BINARY,
(int16_t)(ddlLen + VARSTR_HEADER_SIZE), -1000, ddlLen, false); (int16_t)(ddlLen + VARSTR_HEADER_SIZE), -1000, ddlLen, false);
rowLen += ddlLen + VARSTR_HEADER_SIZE; rowLen += ddlLen + VARSTR_HEADER_SIZE;
......
...@@ -61,8 +61,8 @@ int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, SParsedDataColIn ...@@ -61,8 +61,8 @@ int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, SParsedDataColIn
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec) { int tsParseTime(SStrToken *pToken, int64_t *pTime, char **next, char *err, int16_t timePrec) {
int32_t index = 0; int32_t idx = 0;
SStrToken sToken; SStrToken sToken;
int64_t interval; int64_t interval;
int64_t useconds = 0; int64_t useconds = 0;
...@@ -80,8 +80,8 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1 ...@@ -80,8 +80,8 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
useconds = taosStr2int64(pToken->z); useconds = taosStr2int64(pToken->z);
} else { } else {
// strptime("2001-11-12 18:31:01", "%Y-%m-%d %H:%M:%S", &tm); // strptime("2001-11-12 18:31:01", "%Y-%m-%d %H:%M:%S", &tm);
if (taosParseTime(pToken->z, time, pToken->n, timePrec, tsDaylight) != TSDB_CODE_SUCCESS) { if (taosParseTime(pToken->z, pTime, pToken->n, timePrec, tsDaylight) != TSDB_CODE_SUCCESS) {
return tscInvalidOperationMsg(error, "invalid timestamp format", pToken->z); return tscInvalidOperationMsg(err, "invalid timestamp format", pToken->z);
} }
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
...@@ -91,7 +91,7 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1 ...@@ -91,7 +91,7 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
if (isspace(pToken->z[k])) continue; if (isspace(pToken->z[k])) continue;
if (pToken->z[k] == ',') { if (pToken->z[k] == ',') {
*next = pTokenEnd; *next = pTokenEnd;
*time = useconds; *pTime = useconds;
return 0; return 0;
} }
...@@ -103,17 +103,17 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1 ...@@ -103,17 +103,17 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
* e.g., now+12a, now-5h * e.g., now+12a, now-5h
*/ */
SStrToken valueToken; SStrToken valueToken;
index = 0; idx = 0;
sToken = tStrGetToken(pTokenEnd, &index, false); sToken = tStrGetToken(pTokenEnd, &idx, false);
pTokenEnd += index; pTokenEnd += idx;
if (sToken.type == TK_MINUS || sToken.type == TK_PLUS) { if (sToken.type == TK_MINUS || sToken.type == TK_PLUS) {
index = 0; idx = 0;
valueToken = tStrGetToken(pTokenEnd, &index, false); valueToken = tStrGetToken(pTokenEnd, &idx, false);
pTokenEnd += index; pTokenEnd += idx;
if (valueToken.n < 2) { if (valueToken.n < 2) {
return tscInvalidOperationMsg(error, "value expected in timestamp", sToken.z); return tscInvalidOperationMsg(err, "value expected in timestamp", sToken.z);
} }
char unit = 0; char unit = 0;
...@@ -130,7 +130,7 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1 ...@@ -130,7 +130,7 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
*next = pTokenEnd; *next = pTokenEnd;
} }
*time = useconds; *pTime = useconds;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
...@@ -433,7 +433,7 @@ int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start) { ...@@ -433,7 +433,7 @@ int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start) {
int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, int32_t *len, char *tmpTokenBuf, int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, int32_t *len, char *tmpTokenBuf,
SInsertStatementParam *pInsertParam) { SInsertStatementParam *pInsertParam) {
int32_t index = 0; int32_t idx = 0;
SStrToken sToken = {0}; SStrToken sToken = {0};
char *row = pDataBlocks->pData + pDataBlocks->size; // skip the SSubmitBlk header char *row = pDataBlocks->pData + pDataBlocks->size; // skip the SSubmitBlk header
...@@ -455,9 +455,9 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i ...@@ -455,9 +455,9 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i
SSchema *pSchema = &schema[colIndex]; // get colId here SSchema *pSchema = &schema[colIndex]; // get colId here
index = 0; idx = 0;
sToken = tStrGetToken(*str, &index, true); sToken = tStrGetToken(*str, &idx, true);
*str += index; *str += idx;
if (sToken.type == TK_QUESTION) { if (sToken.type == TK_QUESTION) {
if (!isParseBindParam) { if (!isParseBindParam) {
...@@ -564,7 +564,7 @@ int32_t boundIdxCompar(const void *lhs, const void *rhs) { ...@@ -564,7 +564,7 @@ int32_t boundIdxCompar(const void *lhs, const void *rhs) {
int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SInsertStatementParam *pInsertParam, int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SInsertStatementParam *pInsertParam,
int32_t* numOfRows, char *tmpTokenBuf) { int32_t* numOfRows, char *tmpTokenBuf) {
int32_t index = 0; int32_t idx = 0;
int32_t code = 0; int32_t code = 0;
(*numOfRows) = 0; (*numOfRows) = 0;
...@@ -584,11 +584,11 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SIn ...@@ -584,11 +584,11 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SIn
pDataBlock->rowBuilder.rowSize = extendedRowSize; pDataBlock->rowBuilder.rowSize = extendedRowSize;
while (1) { while (1) {
index = 0; idx = 0;
sToken = tStrGetToken(*str, &index, false); sToken = tStrGetToken(*str, &idx, false);
if (sToken.n == 0 || sToken.type != TK_LP) break; if (sToken.n == 0 || sToken.type != TK_LP) break;
*str += index; *str += idx;
if ((*numOfRows) >= maxRows || pDataBlock->size + extendedRowSize >= pDataBlock->nAllocSize) { if ((*numOfRows) >= maxRows || pDataBlock->size + extendedRowSize >= pDataBlock->nAllocSize) {
int32_t tSize; int32_t tSize;
code = tscAllocateMemIfNeed(pDataBlock, extendedRowSize, &tSize); code = tscAllocateMemIfNeed(pDataBlock, extendedRowSize, &tSize);
...@@ -609,13 +609,13 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SIn ...@@ -609,13 +609,13 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SIn
pDataBlock->size += len; pDataBlock->size += len;
index = 0; idx = 0;
sToken = tStrGetToken(*str, &index, false); sToken = tStrGetToken(*str, &idx, false);
if (sToken.n == 0 || sToken.type != TK_RP) { if (sToken.n == 0 || sToken.type != TK_RP) {
return tscSQLSyntaxErrMsg(pInsertParam->msg, ") expected", *str); return tscSQLSyntaxErrMsg(pInsertParam->msg, ") expected", *str);
} }
*str += index; *str += idx;
(*numOfRows)++; (*numOfRows)++;
} }
...@@ -876,7 +876,7 @@ int validateTableName(char *tblName, int len, SStrToken* psTblToken, bool *dbInc ...@@ -876,7 +876,7 @@ int validateTableName(char *tblName, int len, SStrToken* psTblToken, bool *dbInc
static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundColumn) { static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundColumn) {
int32_t index = 0; int32_t idx = 0;
SStrToken sToken = {0}; SStrToken sToken = {0};
SStrToken tableToken = {0}; SStrToken tableToken = {0};
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
...@@ -891,14 +891,14 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC ...@@ -891,14 +891,14 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
char *sql = *sqlstr; char *sql = *sqlstr;
// get the token of specified table // get the token of specified table
index = 0; idx = 0;
tableToken = tStrGetToken(sql, &index, false); tableToken = tStrGetToken(sql, &idx, false);
sql += index; sql += idx;
// skip possibly exists column list // skip possibly exists column list
index = 0; idx = 0;
sToken = tStrGetToken(sql, &index, false); sToken = tStrGetToken(sql, &idx, false);
sql += index; sql += idx;
int32_t numOfColList = 0; int32_t numOfColList = 0;
...@@ -907,8 +907,8 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC ...@@ -907,8 +907,8 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
*boundColumn = &sToken.z[0]; *boundColumn = &sToken.z[0];
while (1) { while (1) {
index = 0; idx = 0;
sToken = tStrGetToken(sql, &index, false); sToken = tStrGetToken(sql, &idx, false);
if (sToken.type == TK_ILLEGAL) { if (sToken.type == TK_ILLEGAL) {
return tscSQLSyntaxErrMsg(pCmd->payload, "unrecognized token", sToken.z); return tscSQLSyntaxErrMsg(pCmd->payload, "unrecognized token", sToken.z);
...@@ -918,12 +918,12 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC ...@@ -918,12 +918,12 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
break; break;
} }
sql += index; sql += idx;
++numOfColList; ++numOfColList;
} }
sToken = tStrGetToken(sql, &index, false); sToken = tStrGetToken(sql, &idx, false);
sql += index; sql += idx;
} }
if (numOfColList == 0 && (*boundColumn) != NULL) { if (numOfColList == 0 && (*boundColumn) != NULL) {
...@@ -933,9 +933,9 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC ...@@ -933,9 +933,9 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, TABLE_INDEX); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, TABLE_INDEX);
if (sToken.type == TK_USING) { // create table if not exists according to the super table if (sToken.type == TK_USING) { // create table if not exists according to the super table
index = 0; idx = 0;
sToken = tStrGetToken(sql, &index, false); sToken = tStrGetToken(sql, &idx, false);
sql += index; sql += idx;
if (sToken.type == TK_ILLEGAL) { if (sToken.type == TK_ILLEGAL) {
return tscSQLSyntaxErrMsg(pCmd->payload, NULL, sql); return tscSQLSyntaxErrMsg(pCmd->payload, NULL, sql);
...@@ -980,8 +980,8 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC ...@@ -980,8 +980,8 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
SParsedDataColInfo spd = {0}; SParsedDataColInfo spd = {0};
tscSetBoundColumnInfo(&spd, pTagSchema, tscGetNumOfTags(pSTableMetaInfo->pTableMeta)); tscSetBoundColumnInfo(&spd, pTagSchema, tscGetNumOfTags(pSTableMetaInfo->pTableMeta));
index = 0; idx = 0;
sToken = tStrGetToken(sql, &index, false); sToken = tStrGetToken(sql, &idx, false);
if (sToken.type != TK_TAGS && sToken.type != TK_LP) { if (sToken.type != TK_TAGS && sToken.type != TK_LP) {
tscDestroyBoundColumnInfo(&spd); tscDestroyBoundColumnInfo(&spd);
return tscSQLSyntaxErrMsg(pInsertParam->msg, "keyword TAGS expected", sToken.z); return tscSQLSyntaxErrMsg(pInsertParam->msg, "keyword TAGS expected", sToken.z);
...@@ -1002,16 +1002,16 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC ...@@ -1002,16 +1002,16 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
sql = end; sql = end;
index = 0; // keywords of "TAGS" idx = 0; // keywords of "TAGS"
sToken = tStrGetToken(sql, &index, false); sToken = tStrGetToken(sql, &idx, false);
sql += index; sql += idx;
} else { } else {
sql += index; sql += idx;
} }
index = 0; idx = 0;
sToken = tStrGetToken(sql, &index, false); sToken = tStrGetToken(sql, &idx, false);
sql += index; sql += idx;
if (sToken.type != TK_LP) { if (sToken.type != TK_LP) {
tscDestroyBoundColumnInfo(&spd); tscDestroyBoundColumnInfo(&spd);
...@@ -1027,9 +1027,9 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC ...@@ -1027,9 +1027,9 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
for (int i = 0; i < spd.numOfBound; ++i) { for (int i = 0; i < spd.numOfBound; ++i) {
SSchema* pSchema = &pTagSchema[spd.boundedColumns[i]]; SSchema* pSchema = &pTagSchema[spd.boundedColumns[i]];
index = 0; idx = 0;
sToken = tStrGetToken(sql, &index, true); sToken = tStrGetToken(sql, &idx, true);
sql += index; sql += idx;
if (TK_ILLEGAL == sToken.type) { if (TK_ILLEGAL == sToken.type) {
tdDestroyKVRowBuilder(&kvRowBuilder); tdDestroyKVRowBuilder(&kvRowBuilder);
...@@ -1101,9 +1101,9 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC ...@@ -1101,9 +1101,9 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
free(row); free(row);
pInsertParam->tagData.data = pTag; pInsertParam->tagData.data = pTag;
index = 0; idx = 0;
sToken = tStrGetToken(sql, &index, false); sToken = tStrGetToken(sql, &idx, false);
sql += index; sql += idx;
if (sToken.n == 0 || sToken.type != TK_RP) { if (sToken.n == 0 || sToken.type != TK_RP) {
return tscSQLSyntaxErrMsg(pInsertParam->msg, ") expected", sToken.z); return tscSQLSyntaxErrMsg(pInsertParam->msg, ") expected", sToken.z);
} }
...@@ -1112,9 +1112,9 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC ...@@ -1112,9 +1112,9 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
* insert into table_name using super_table(tag_name1, tag_name2) tags(tag_val1, tag_val2) * insert into table_name using super_table(tag_name1, tag_name2) tags(tag_val1, tag_val2)
* (normal_col1, normal_col2) values(normal_col1_val, normal_col2_val); * (normal_col1, normal_col2) values(normal_col1_val, normal_col2_val);
* */ * */
index = 0; idx = 0;
sToken = tStrGetToken(sql, &index, false); sToken = tStrGetToken(sql, &idx, false);
sql += index; sql += idx;
int numOfColsAfterTags = 0; int numOfColsAfterTags = 0;
if (sToken.type == TK_LP) { if (sToken.type == TK_LP) {
if (*boundColumn != NULL) { if (*boundColumn != NULL) {
...@@ -1124,18 +1124,18 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC ...@@ -1124,18 +1124,18 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
} }
while (1) { while (1) {
index = 0; idx = 0;
sToken = tStrGetToken(sql, &index, false); sToken = tStrGetToken(sql, &idx, false);
if (sToken.type == TK_RP) { if (sToken.type == TK_RP) {
break; break;
} }
if (sToken.n == 0 || sToken.type == TK_SEMI || index == 0) { if (sToken.n == 0 || sToken.type == TK_SEMI || idx == 0) {
return tscSQLSyntaxErrMsg(pCmd->payload, "unexpected token", sql); return tscSQLSyntaxErrMsg(pCmd->payload, "unexpected token", sql);
} }
sql += index; sql += idx;
++numOfColsAfterTags; ++numOfColsAfterTags;
} }
...@@ -1143,7 +1143,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC ...@@ -1143,7 +1143,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR; return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
} }
sToken = tStrGetToken(sql, &index, false); sToken = tStrGetToken(sql, &idx, false);
} }
sql = sToken.z; sql = sToken.z;
...@@ -1213,9 +1213,9 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat ...@@ -1213,9 +1213,9 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
int32_t index = 0; int32_t idx = 0;
SStrToken sToken = tStrGetToken(str, &index, false); SStrToken sToken = tStrGetToken(str, &idx, false);
str += index; str += idx;
if (sToken.type != TK_LP) { if (sToken.type != TK_LP) {
code = tscSQLSyntaxErrMsg(pInsertParam->msg, "( is expected", sToken.z); code = tscSQLSyntaxErrMsg(pInsertParam->msg, "( is expected", sToken.z);
...@@ -1225,9 +1225,9 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat ...@@ -1225,9 +1225,9 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat
bool isOrdered = true; bool isOrdered = true;
int32_t lastColIdx = -1; // last column found int32_t lastColIdx = -1; // last column found
while (1) { while (1) {
index = 0; idx = 0;
sToken = tStrGetToken(str, &index, false); sToken = tStrGetToken(str, &idx, false);
str += index; str += idx;
char tmpTokenBuf[TSDB_MAX_BYTES_PER_ROW] = {0}; // used for deleting Escape character backstick(`) char tmpTokenBuf[TSDB_MAX_BYTES_PER_ROW] = {0}; // used for deleting Escape character backstick(`)
strncpy(tmpTokenBuf, sToken.z, sToken.n); strncpy(tmpTokenBuf, sToken.z, sToken.n);
...@@ -1404,8 +1404,8 @@ int tsParseInsertSql(SSqlObj *pSql) { ...@@ -1404,8 +1404,8 @@ int tsParseInsertSql(SSqlObj *pSql) {
tscDebug("0x%"PRIx64" create data block list hashList:%p", pSql->self, pInsertParam->pTableBlockHashList); tscDebug("0x%"PRIx64" create data block list hashList:%p", pSql->self, pInsertParam->pTableBlockHashList);
while (1) { while (1) {
int32_t index = 0; int32_t idx = 0;
SStrToken sToken = tStrGetToken(str, &index, false); SStrToken sToken = tStrGetToken(str, &idx, false);
// no data in the sql string anymore. // no data in the sql string anymore.
if (sToken.n == 0) { if (sToken.n == 0) {
...@@ -1469,9 +1469,9 @@ int tsParseInsertSql(SSqlObj *pSql) { ...@@ -1469,9 +1469,9 @@ int tsParseInsertSql(SSqlObj *pSql) {
goto _clean; goto _clean;
} }
index = 0; idx = 0;
sToken = tStrGetToken(str, &index, false); sToken = tStrGetToken(str, &idx, false);
str += index; str += idx;
if (sToken.n == 0 || (sToken.type != TK_FILE && sToken.type != TK_VALUES)) { if (sToken.n == 0 || (sToken.type != TK_FILE && sToken.type != TK_VALUES)) {
code = tscSQLSyntaxErrMsg(pInsertParam->msg, "keyword VALUES or FILE required", sToken.z); code = tscSQLSyntaxErrMsg(pInsertParam->msg, "keyword VALUES or FILE required", sToken.z);
...@@ -1484,13 +1484,13 @@ int tsParseInsertSql(SSqlObj *pSql) { ...@@ -1484,13 +1484,13 @@ int tsParseInsertSql(SSqlObj *pSql) {
goto _clean; goto _clean;
} }
index = 0; idx = 0;
sToken = tStrGetToken(str, &index, false); sToken = tStrGetToken(str, &idx, false);
if (sToken.type != TK_STRING && sToken.type != TK_ID) { if (sToken.type != TK_STRING && sToken.type != TK_ID) {
code = tscSQLSyntaxErrMsg(pInsertParam->msg, "file path is required following keyword FILE", sToken.z); code = tscSQLSyntaxErrMsg(pInsertParam->msg, "file path is required following keyword FILE", sToken.z);
goto _clean; goto _clean;
} }
str += index; str += idx;
if (sToken.n == 0) { if (sToken.n == 0) {
code = tscSQLSyntaxErrMsg(pInsertParam->msg, "file path is required following keyword FILE", sToken.z); code = tscSQLSyntaxErrMsg(pInsertParam->msg, "file path is required following keyword FILE", sToken.z);
goto _clean; goto _clean;
...@@ -1590,7 +1590,7 @@ int tsInsertInitialCheck(SSqlObj *pSql) { ...@@ -1590,7 +1590,7 @@ int tsInsertInitialCheck(SSqlObj *pSql) {
return TSDB_CODE_TSC_NO_WRITE_AUTH; return TSDB_CODE_TSC_NO_WRITE_AUTH;
} }
int32_t index = 0; int32_t idx = 0;
SSqlCmd *pCmd = &pSql->cmd; SSqlCmd *pCmd = &pSql->cmd;
pCmd->count = 0; pCmd->count = 0;
...@@ -1600,12 +1600,12 @@ int tsInsertInitialCheck(SSqlObj *pSql) { ...@@ -1600,12 +1600,12 @@ int tsInsertInitialCheck(SSqlObj *pSql) {
SQueryInfo *pQueryInfo = tscGetQueryInfoS(pCmd); SQueryInfo *pQueryInfo = tscGetQueryInfoS(pCmd);
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT); TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT);
SStrToken sToken = tStrGetToken(pSql->sqlstr, &index, false); SStrToken sToken = tStrGetToken(pSql->sqlstr, &idx, false);
if (sToken.type != TK_INSERT && sToken.type != TK_IMPORT) { if (sToken.type != TK_INSERT && sToken.type != TK_IMPORT) {
return tscSQLSyntaxErrMsg(pInsertParam->msg, NULL, sToken.z); return tscSQLSyntaxErrMsg(pInsertParam->msg, NULL, sToken.z);
} }
sToken = tStrGetToken(pSql->sqlstr, &index, false); sToken = tStrGetToken(pSql->sqlstr, &idx, false);
if (sToken.type != TK_INTO) { if (sToken.type != TK_INTO) {
return tscSQLSyntaxErrMsg(pInsertParam->msg, "keyword INTO is expected", sToken.z); return tscSQLSyntaxErrMsg(pInsertParam->msg, "keyword INTO is expected", sToken.z);
} }
......
...@@ -1966,14 +1966,14 @@ int32_t convertSmlTimeStamp(TAOS_SML_KV *pVal, char *value, ...@@ -1966,14 +1966,14 @@ int32_t convertSmlTimeStamp(TAOS_SML_KV *pVal, char *value,
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
static int32_t parseSmlTimeStamp(TAOS_SML_KV **pTS, const char **index, SSmlLinesInfo* info) { static int32_t parseSmlTimeStamp(TAOS_SML_KV **pTS, const char **idx, SSmlLinesInfo* info) {
const char *start, *cur; const char *start, *cur;
int32_t ret = TSDB_CODE_SUCCESS; int32_t ret = TSDB_CODE_SUCCESS;
int len = 0; int len = 0;
char key[] = "ts"; char key[] = "ts";
char *value = NULL; char *value = NULL;
start = cur = *index; start = cur = *idx;
*pTS = calloc(1, sizeof(TAOS_SML_KV)); *pTS = calloc(1, sizeof(TAOS_SML_KV));
while(*cur != '\0') { while(*cur != '\0') {
...@@ -2013,8 +2013,8 @@ bool checkDuplicateKey(char *key, SHashObj *pHash, SSmlLinesInfo* info) { ...@@ -2013,8 +2013,8 @@ bool checkDuplicateKey(char *key, SHashObj *pHash, SSmlLinesInfo* info) {
return false; return false;
} }
static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash, SSmlLinesInfo* info) { static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **idx, SHashObj *pHash, SSmlLinesInfo* info) {
const char *cur = *index; const char *cur = *idx;
char key[TSDB_COL_NAME_LEN + 1]; // +1 to avoid key[len] over write char key[TSDB_COL_NAME_LEN + 1]; // +1 to avoid key[len] over write
int16_t len = 0; int16_t len = 0;
...@@ -2048,12 +2048,12 @@ static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash ...@@ -2048,12 +2048,12 @@ static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash
memcpy(pKV->key, key, len + 1); memcpy(pKV->key, key, len + 1);
addEscapeCharToString(pKV->key, len); addEscapeCharToString(pKV->key, len);
tscDebug("SML:0x%"PRIx64" Key:%s|len:%d", info->id, pKV->key, len); tscDebug("SML:0x%"PRIx64" Key:%s|len:%d", info->id, pKV->key, len);
*index = cur + 1; *idx = cur + 1;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index, static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **idx,
bool *is_last_kv, SSmlLinesInfo* info, bool isTag) { bool *is_last_kv, SSmlLinesInfo* info, bool isTag) {
const char *start, *cur; const char *start, *cur;
int32_t ret = TSDB_CODE_SUCCESS; int32_t ret = TSDB_CODE_SUCCESS;
...@@ -2077,7 +2077,7 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index, ...@@ -2077,7 +2077,7 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index,
val_rqoute val_rqoute
} val_state; } val_state;
start = cur = *index; start = cur = *idx;
tag_state = tag_common; tag_state = tag_common;
val_state = val_common; val_state = val_common;
...@@ -2100,17 +2100,17 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index, ...@@ -2100,17 +2100,17 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index,
} }
if (*cur == '"') { if (*cur == '"') {
if (cur == *index) { if (cur == *idx) {
tag_state = tag_lqoute; tag_state = tag_lqoute;
} }
cur += 1; cur += 1;
len += 1; len += 1;
break; break;
} else if (*cur == 'L') { } else if (*cur == 'L') {
line_len = strlen(*index); line_len = strlen(*idx);
/* common character at the end */ /* common character at the end */
if (cur + 1 >= *index + line_len) { if (cur + 1 >= *idx + line_len) {
*is_last_kv = true; *is_last_kv = true;
kv_done = true; kv_done = true;
break; break;
...@@ -2118,7 +2118,7 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index, ...@@ -2118,7 +2118,7 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index,
if (*(cur + 1) == '"') { if (*(cur + 1) == '"') {
/* string starts here */ /* string starts here */
if (cur + 1 == *index + 1) { if (cur + 1 == *idx + 1) {
tag_state = tag_lqoute; tag_state = tag_lqoute;
} }
cur += 2; cur += 2;
...@@ -2224,7 +2224,7 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index, ...@@ -2224,7 +2224,7 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index,
} }
if (*cur == '"') { if (*cur == '"') {
if (cur == *index) { if (cur == *idx) {
val_state = val_lqoute; val_state = val_lqoute;
} else { } else {
if (*(cur - 1) != '\\') { if (*(cur - 1) != '\\') {
...@@ -2238,10 +2238,10 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index, ...@@ -2238,10 +2238,10 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index,
len += 1; len += 1;
break; break;
} else if (*cur == 'L') { } else if (*cur == 'L') {
line_len = strlen(*index); line_len = strlen(*idx);
/* common character at the end */ /* common character at the end */
if (cur + 1 >= *index + line_len) { if (cur + 1 >= *idx + line_len) {
*is_last_kv = true; *is_last_kv = true;
kv_done = true; kv_done = true;
break; break;
...@@ -2249,13 +2249,13 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index, ...@@ -2249,13 +2249,13 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index,
if (*(cur + 1) == '"') { if (*(cur + 1) == '"') {
/* string starts here */ /* string starts here */
if (cur + 1 == *index + 1) { if (cur + 1 == *idx + 1) {
val_state = val_lqoute; val_state = val_lqoute;
cur += 2; cur += 2;
len += 2; len += 2;
} else { } else {
/* MUST at the end of string */ /* MUST at the end of string */
if (cur + 2 >= *index + line_len) { if (cur + 2 >= *idx + line_len) {
cur += 2; cur += 2;
len += 2; len += 2;
*is_last_kv = true; *is_last_kv = true;
...@@ -2385,7 +2385,7 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index, ...@@ -2385,7 +2385,7 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index,
} }
free(value); free(value);
*index = (*cur == '\0') ? cur : cur + 1; *idx = (*cur == '\0') ? cur : cur + 1;
return ret; return ret;
error: error:
...@@ -2395,9 +2395,9 @@ error: ...@@ -2395,9 +2395,9 @@ error:
return ret; return ret;
} }
static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index, static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **idx,
uint8_t *has_tags, SSmlLinesInfo* info) { uint8_t *has_tags, SSmlLinesInfo* info) {
const char *cur = *index; const char *cur = *idx;
int16_t len = 0; int16_t len = 0;
pSml->stableName = calloc(TSDB_TABLE_NAME_LEN + TS_BACKQUOTE_CHAR_SIZE, 1); pSml->stableName = calloc(TSDB_TABLE_NAME_LEN + TS_BACKQUOTE_CHAR_SIZE, 1);
...@@ -2441,7 +2441,7 @@ static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index ...@@ -2441,7 +2441,7 @@ static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index
return TSDB_CODE_TSC_LINE_SYNTAX_ERROR; return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
} }
addEscapeCharToString(pSml->stableName, len); addEscapeCharToString(pSml->stableName, len);
*index = cur + 1; *idx = cur + 1;
tscDebug("SML:0x%"PRIx64" Stable name in measurement:%s|len:%d", info->id, pSml->stableName, len); tscDebug("SML:0x%"PRIx64" Stable name in measurement:%s|len:%d", info->id, pSml->stableName, len);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
...@@ -2464,10 +2464,10 @@ int32_t isValidChildTableName(const char *pTbName, int16_t len, SSmlLinesInfo* i ...@@ -2464,10 +2464,10 @@ int32_t isValidChildTableName(const char *pTbName, int16_t len, SSmlLinesInfo* i
static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs, static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs,
const char **index, bool isField, const char **idx, bool isField,
TAOS_SML_DATA_POINT* smlData, SHashObj *pHash, TAOS_SML_DATA_POINT* smlData, SHashObj *pHash,
SSmlLinesInfo* info) { SSmlLinesInfo* info) {
const char *cur = *index; const char *cur = *idx;
int32_t ret = TSDB_CODE_SUCCESS; int32_t ret = TSDB_CODE_SUCCESS;
TAOS_SML_KV *pkv; TAOS_SML_KV *pkv;
bool is_last_kv = false; bool is_last_kv = false;
...@@ -2555,7 +2555,7 @@ static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs, ...@@ -2555,7 +2555,7 @@ static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs,
error: error:
return ret; return ret;
done: done:
*index = cur; *idx = cur;
return ret; return ret;
} }
...@@ -2575,13 +2575,13 @@ static void moveTimeStampToFirstKv(TAOS_SML_DATA_POINT** smlData, TAOS_SML_KV *t ...@@ -2575,13 +2575,13 @@ static void moveTimeStampToFirstKv(TAOS_SML_DATA_POINT** smlData, TAOS_SML_KV *t
} }
int32_t tscParseLine(const char* sql, TAOS_SML_DATA_POINT* smlData, SSmlLinesInfo* info) { int32_t tscParseLine(const char* sql, TAOS_SML_DATA_POINT* smlData, SSmlLinesInfo* info) {
const char* index = sql; const char* idx = sql;
int32_t ret = TSDB_CODE_SUCCESS; int32_t ret = TSDB_CODE_SUCCESS;
uint8_t has_tags = 0; uint8_t has_tags = 0;
TAOS_SML_KV *timestamp = NULL; TAOS_SML_KV *timestamp = NULL;
SHashObj *keyHashTable = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false); SHashObj *keyHashTable = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false);
ret = parseSmlMeasurement(smlData, &index, &has_tags, info); ret = parseSmlMeasurement(smlData, &idx, &has_tags, info);
if (ret) { if (ret) {
tscError("SML:0x%"PRIx64" Unable to parse measurement", info->id); tscError("SML:0x%"PRIx64" Unable to parse measurement", info->id);
taosHashCleanup(keyHashTable); taosHashCleanup(keyHashTable);
...@@ -2591,7 +2591,7 @@ int32_t tscParseLine(const char* sql, TAOS_SML_DATA_POINT* smlData, SSmlLinesInf ...@@ -2591,7 +2591,7 @@ int32_t tscParseLine(const char* sql, TAOS_SML_DATA_POINT* smlData, SSmlLinesInf
//Parse Tags //Parse Tags
if (has_tags) { if (has_tags) {
ret = parseSmlKvPairs(&smlData->tags, &smlData->tagNum, &index, false, smlData, keyHashTable, info); ret = parseSmlKvPairs(&smlData->tags, &smlData->tagNum, &idx, false, smlData, keyHashTable, info);
if (ret) { if (ret) {
tscError("SML:0x%"PRIx64" Unable to parse tag", info->id); tscError("SML:0x%"PRIx64" Unable to parse tag", info->id);
taosHashCleanup(keyHashTable); taosHashCleanup(keyHashTable);
...@@ -2601,7 +2601,7 @@ int32_t tscParseLine(const char* sql, TAOS_SML_DATA_POINT* smlData, SSmlLinesInf ...@@ -2601,7 +2601,7 @@ int32_t tscParseLine(const char* sql, TAOS_SML_DATA_POINT* smlData, SSmlLinesInf
tscDebug("SML:0x%"PRIx64" Parse tags finished, num of tags:%d", info->id, smlData->tagNum); tscDebug("SML:0x%"PRIx64" Parse tags finished, num of tags:%d", info->id, smlData->tagNum);
//Parse fields //Parse fields
ret = parseSmlKvPairs(&smlData->fields, &smlData->fieldNum, &index, true, smlData, keyHashTable, info); ret = parseSmlKvPairs(&smlData->fields, &smlData->fieldNum, &idx, true, smlData, keyHashTable, info);
if (ret) { if (ret) {
tscError("SML:0x%"PRIx64" Unable to parse field", info->id); tscError("SML:0x%"PRIx64" Unable to parse field", info->id);
taosHashCleanup(keyHashTable); taosHashCleanup(keyHashTable);
...@@ -2611,7 +2611,7 @@ int32_t tscParseLine(const char* sql, TAOS_SML_DATA_POINT* smlData, SSmlLinesInf ...@@ -2611,7 +2611,7 @@ int32_t tscParseLine(const char* sql, TAOS_SML_DATA_POINT* smlData, SSmlLinesInf
taosHashCleanup(keyHashTable); taosHashCleanup(keyHashTable);
//Parse timestamp //Parse timestamp
ret = parseSmlTimeStamp(&timestamp, &index, info); ret = parseSmlTimeStamp(&timestamp, &idx, info);
if (ret) { if (ret) {
tscError("SML:0x%"PRIx64" Unable to parse timestamp", info->id); tscError("SML:0x%"PRIx64" Unable to parse timestamp", info->id);
return ret; return ret;
......
...@@ -33,8 +33,8 @@ static uint64_t genUID() { ...@@ -33,8 +33,8 @@ static uint64_t genUID() {
return id; return id;
} }
static int32_t parseTelnetMetric(TAOS_SML_DATA_POINT *pSml, const char **index, SSmlLinesInfo* info) { static int32_t parseTelnetMetric(TAOS_SML_DATA_POINT *pSml, const char **idx, SSmlLinesInfo* info) {
const char *cur = *index; const char *cur = *idx;
uint16_t len = 0; uint16_t len = 0;
pSml->stableName = tcalloc(TSDB_TABLE_NAME_LEN + TS_BACKQUOTE_CHAR_SIZE, 1); pSml->stableName = tcalloc(TSDB_TABLE_NAME_LEN + TS_BACKQUOTE_CHAR_SIZE, 1);
...@@ -76,13 +76,13 @@ static int32_t parseTelnetMetric(TAOS_SML_DATA_POINT *pSml, const char **index, ...@@ -76,13 +76,13 @@ static int32_t parseTelnetMetric(TAOS_SML_DATA_POINT *pSml, const char **index,
} }
addEscapeCharToString(pSml->stableName, len); addEscapeCharToString(pSml->stableName, len);
*index = cur + 1; *idx = cur + 1;
tscDebug("OTD:0x%"PRIx64" Stable name in metric:%s|len:%d", info->id, pSml->stableName, len); tscDebug("OTD:0x%"PRIx64" Stable name in metric:%s|len:%d", info->id, pSml->stableName, len);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
static int32_t parseTelnetTimeStamp(TAOS_SML_KV **pTS, int *num_kvs, const char **index, SSmlLinesInfo* info) { static int32_t parseTelnetTimeStamp(TAOS_SML_KV **pTS, int *num_kvs, const char **idx, SSmlLinesInfo* info) {
//Timestamp must be the first KV to parse //Timestamp must be the first KV to parse
assert(*num_kvs == 0); assert(*num_kvs == 0);
...@@ -92,7 +92,7 @@ static int32_t parseTelnetTimeStamp(TAOS_SML_KV **pTS, int *num_kvs, const char ...@@ -92,7 +92,7 @@ static int32_t parseTelnetTimeStamp(TAOS_SML_KV **pTS, int *num_kvs, const char
char key[] = OTD_TIMESTAMP_COLUMN_NAME; char key[] = OTD_TIMESTAMP_COLUMN_NAME;
char *value = NULL; char *value = NULL;
start = cur = *index; start = cur = *idx;
//allocate fields for timestamp and value //allocate fields for timestamp and value
*pTS = tcalloc(OTD_MAX_FIELDS_NUM, sizeof(TAOS_SML_KV)); *pTS = tcalloc(OTD_MAX_FIELDS_NUM, sizeof(TAOS_SML_KV));
...@@ -130,12 +130,12 @@ static int32_t parseTelnetTimeStamp(TAOS_SML_KV **pTS, int *num_kvs, const char ...@@ -130,12 +130,12 @@ static int32_t parseTelnetTimeStamp(TAOS_SML_KV **pTS, int *num_kvs, const char
addEscapeCharToString((*pTS)->key, (int32_t)strlen(key)); addEscapeCharToString((*pTS)->key, (int32_t)strlen(key));
*num_kvs += 1; *num_kvs += 1;
*index = cur + 1; *idx = cur + 1;
return ret; return ret;
} }
static int32_t parseTelnetMetricValue(TAOS_SML_KV **pKVs, int *num_kvs, const char **index, SSmlLinesInfo* info) { static int32_t parseTelnetMetricValue(TAOS_SML_KV **pKVs, int *num_kvs, const char **idx, SSmlLinesInfo* info) {
//skip timestamp //skip timestamp
TAOS_SML_KV *pVal = *pKVs + 1; TAOS_SML_KV *pVal = *pKVs + 1;
const char *start, *cur; const char *start, *cur;
...@@ -145,7 +145,7 @@ static int32_t parseTelnetMetricValue(TAOS_SML_KV **pKVs, int *num_kvs, const ch ...@@ -145,7 +145,7 @@ static int32_t parseTelnetMetricValue(TAOS_SML_KV **pKVs, int *num_kvs, const ch
char key[] = OTD_METRIC_VALUE_COLUMN_NAME; char key[] = OTD_METRIC_VALUE_COLUMN_NAME;
char *value = NULL; char *value = NULL;
start = cur = *index; start = cur = *idx;
//if metric value is string //if metric value is string
if (*cur == '"') { if (*cur == '"') {
...@@ -201,12 +201,12 @@ static int32_t parseTelnetMetricValue(TAOS_SML_KV **pKVs, int *num_kvs, const ch ...@@ -201,12 +201,12 @@ static int32_t parseTelnetMetricValue(TAOS_SML_KV **pKVs, int *num_kvs, const ch
addEscapeCharToString(pVal->key, (int32_t)strlen(pVal->key)); addEscapeCharToString(pVal->key, (int32_t)strlen(pVal->key));
*num_kvs += 1; *num_kvs += 1;
*index = cur + 1; *idx = cur + 1;
return ret; return ret;
} }
static int32_t parseTelnetTagKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash, SSmlLinesInfo* info) { static int32_t parseTelnetTagKey(TAOS_SML_KV *pKV, const char **idx, SHashObj *pHash, SSmlLinesInfo* info) {
const char *cur = *index; const char *cur = *idx;
char key[TSDB_COL_NAME_LEN]; char key[TSDB_COL_NAME_LEN];
uint16_t len = 0; uint16_t len = 0;
...@@ -244,17 +244,17 @@ static int32_t parseTelnetTagKey(TAOS_SML_KV *pKV, const char **index, SHashObj ...@@ -244,17 +244,17 @@ static int32_t parseTelnetTagKey(TAOS_SML_KV *pKV, const char **index, SHashObj
memcpy(pKV->key, key, len + 1); memcpy(pKV->key, key, len + 1);
addEscapeCharToString(pKV->key, len); addEscapeCharToString(pKV->key, len);
//tscDebug("OTD:0x%"PRIx64" Key:%s|len:%d", info->id, pKV->key, len); //tscDebug("OTD:0x%"PRIx64" Key:%s|len:%d", info->id, pKV->key, len);
*index = cur + 1; *idx = cur + 1;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
static int32_t parseTelnetTagValue(TAOS_SML_KV *pKV, const char **index, static int32_t parseTelnetTagValue(TAOS_SML_KV *pKV, const char **idx,
bool *is_last_kv, SSmlLinesInfo* info) { bool *is_last_kv, SSmlLinesInfo* info) {
const char *start, *cur; const char *start, *cur;
char *value = NULL; char *value = NULL;
uint16_t len = 0; uint16_t len = 0;
start = cur = *index; start = cur = *idx;
while (1) { while (1) {
// whitespace or '\0' identifies a value // whitespace or '\0' identifies a value
...@@ -290,14 +290,14 @@ static int32_t parseTelnetTagValue(TAOS_SML_KV *pKV, const char **index, ...@@ -290,14 +290,14 @@ static int32_t parseTelnetTagValue(TAOS_SML_KV *pKV, const char **index,
} }
tfree(value); tfree(value);
*index = (*cur == '\0') ? cur : cur + 1; *idx = (*cur == '\0') ? cur : cur + 1;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
static int32_t parseTelnetTagKvs(TAOS_SML_KV **pKVs, int *num_kvs, static int32_t parseTelnetTagKvs(TAOS_SML_KV **pKVs, int *num_kvs,
const char **index, char **childTableName, const char **idx, char **childTableName,
SHashObj *pHash, SSmlLinesInfo* info) { SHashObj *pHash, SSmlLinesInfo* info) {
const char *cur = *index; const char *cur = *idx;
int32_t ret = TSDB_CODE_SUCCESS; int32_t ret = TSDB_CODE_SUCCESS;
TAOS_SML_KV *pkv; TAOS_SML_KV *pkv;
bool is_last_kv = false; bool is_last_kv = false;
...@@ -357,11 +357,11 @@ static int32_t parseTelnetTagKvs(TAOS_SML_KV **pKVs, int *num_kvs, ...@@ -357,11 +357,11 @@ static int32_t parseTelnetTagKvs(TAOS_SML_KV **pKVs, int *num_kvs,
} }
static int32_t tscParseTelnetLine(const char* line, TAOS_SML_DATA_POINT* smlData, SSmlLinesInfo* info) { static int32_t tscParseTelnetLine(const char* line, TAOS_SML_DATA_POINT* smlData, SSmlLinesInfo* info) {
const char* index = line; const char* idx = line;
int32_t ret = TSDB_CODE_SUCCESS; int32_t ret = TSDB_CODE_SUCCESS;
//Parse metric //Parse metric
ret = parseTelnetMetric(smlData, &index, info); ret = parseTelnetMetric(smlData, &idx, info);
if (ret) { if (ret) {
tscError("OTD:0x%"PRIx64" Unable to parse metric", info->id); tscError("OTD:0x%"PRIx64" Unable to parse metric", info->id);
return ret; return ret;
...@@ -369,7 +369,7 @@ static int32_t tscParseTelnetLine(const char* line, TAOS_SML_DATA_POINT* smlData ...@@ -369,7 +369,7 @@ static int32_t tscParseTelnetLine(const char* line, TAOS_SML_DATA_POINT* smlData
tscDebug("OTD:0x%"PRIx64" Parse metric finished", info->id); tscDebug("OTD:0x%"PRIx64" Parse metric finished", info->id);
//Parse timestamp //Parse timestamp
ret = parseTelnetTimeStamp(&smlData->fields, &smlData->fieldNum, &index, info); ret = parseTelnetTimeStamp(&smlData->fields, &smlData->fieldNum, &idx, info);
if (ret) { if (ret) {
tscError("OTD:0x%"PRIx64" Unable to parse timestamp", info->id); tscError("OTD:0x%"PRIx64" Unable to parse timestamp", info->id);
return ret; return ret;
...@@ -377,7 +377,7 @@ static int32_t tscParseTelnetLine(const char* line, TAOS_SML_DATA_POINT* smlData ...@@ -377,7 +377,7 @@ static int32_t tscParseTelnetLine(const char* line, TAOS_SML_DATA_POINT* smlData
tscDebug("OTD:0x%"PRIx64" Parse timestamp finished", info->id); tscDebug("OTD:0x%"PRIx64" Parse timestamp finished", info->id);
//Parse value //Parse value
ret = parseTelnetMetricValue(&smlData->fields, &smlData->fieldNum, &index, info); ret = parseTelnetMetricValue(&smlData->fields, &smlData->fieldNum, &idx, info);
if (ret) { if (ret) {
tscError("OTD:0x%"PRIx64" Unable to parse metric value", info->id); tscError("OTD:0x%"PRIx64" Unable to parse metric value", info->id);
return ret; return ret;
...@@ -386,7 +386,7 @@ static int32_t tscParseTelnetLine(const char* line, TAOS_SML_DATA_POINT* smlData ...@@ -386,7 +386,7 @@ static int32_t tscParseTelnetLine(const char* line, TAOS_SML_DATA_POINT* smlData
//Parse tagKVs //Parse tagKVs
SHashObj *keyHashTable = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false); SHashObj *keyHashTable = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false);
ret = parseTelnetTagKvs(&smlData->tags, &smlData->tagNum, &index, &smlData->childTableName, keyHashTable, info); ret = parseTelnetTagKvs(&smlData->tags, &smlData->tagNum, &idx, &smlData->childTableName, keyHashTable, info);
if (ret) { if (ret) {
tscError("OTD:0x%"PRIx64" Unable to parse tags", info->id); tscError("OTD:0x%"PRIx64" Unable to parse tags", info->id);
taosHashCleanup(keyHashTable); taosHashCleanup(keyHashTable);
......
...@@ -121,11 +121,11 @@ static int normalStmtAddPart(SNormalStmt* stmt, bool isParam, char* str, uint32_ ...@@ -121,11 +121,11 @@ static int normalStmtAddPart(SNormalStmt* stmt, bool isParam, char* str, uint32_
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
static int normalStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) { static int normalStmtBindParam(STscStmt* stmt, TAOS_BIND* pBind) {
SNormalStmt* normal = &stmt->normal; SNormalStmt* normal = &stmt->normal;
for (uint16_t i = 0; i < normal->numParams; ++i) { for (uint16_t i = 0; i < normal->numParams; ++i) {
TAOS_BIND* tb = bind + i; TAOS_BIND* tb = pBind + i;
tVariant* var = normal->params + i; tVariant* var = normal->params + i;
tVariantDestroy(var); tVariantDestroy(var);
...@@ -383,8 +383,8 @@ int32_t fillTablesColumnsNull(SSqlObj* pSql) { ...@@ -383,8 +383,8 @@ int32_t fillTablesColumnsNull(SSqlObj* pSql) {
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// functions for insertion statement preparation // functions for insertion statement preparation
static FORCE_INLINE int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param, TAOS_BIND* bind, int32_t colNum) { static FORCE_INLINE int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param, TAOS_BIND* pBind, int32_t colNum) {
if (bind->is_null != NULL && *(bind->is_null)) { if (pBind->is_null != NULL && *(pBind->is_null)) {
setNull(data + param->offset, param->type, param->bytes); setNull(data + param->offset, param->type, param->bytes);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
...@@ -772,7 +772,7 @@ static FORCE_INLINE int doBindParam(STableDataBlocks* pBlock, char* data, SParam ...@@ -772,7 +772,7 @@ static FORCE_INLINE int doBindParam(STableDataBlocks* pBlock, char* data, SParam
} }
#endif #endif
if (bind->buffer_type != param->type) { if (pBind->buffer_type != param->type) {
tscError("column type mismatch"); tscError("column type mismatch");
return TSDB_CODE_TSC_INVALID_VALUE; return TSDB_CODE_TSC_INVALID_VALUE;
} }
...@@ -782,39 +782,39 @@ static FORCE_INLINE int doBindParam(STableDataBlocks* pBlock, char* data, SParam ...@@ -782,39 +782,39 @@ static FORCE_INLINE int doBindParam(STableDataBlocks* pBlock, char* data, SParam
case TSDB_DATA_TYPE_BOOL: case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT: case TSDB_DATA_TYPE_TINYINT:
case TSDB_DATA_TYPE_UTINYINT: case TSDB_DATA_TYPE_UTINYINT:
*(uint8_t *)(data + param->offset) = *(uint8_t *)bind->buffer; *(uint8_t *)(data + param->offset) = *(uint8_t *)pBind->buffer;
break; break;
case TSDB_DATA_TYPE_SMALLINT: case TSDB_DATA_TYPE_SMALLINT:
case TSDB_DATA_TYPE_USMALLINT: case TSDB_DATA_TYPE_USMALLINT:
*(uint16_t *)(data + param->offset) = *(uint16_t *)bind->buffer; *(uint16_t *)(data + param->offset) = *(uint16_t *)pBind->buffer;
break; break;
case TSDB_DATA_TYPE_INT: case TSDB_DATA_TYPE_INT:
case TSDB_DATA_TYPE_UINT: case TSDB_DATA_TYPE_UINT:
case TSDB_DATA_TYPE_FLOAT: case TSDB_DATA_TYPE_FLOAT:
*(uint32_t *)(data + param->offset) = *(uint32_t *)bind->buffer; *(uint32_t *)(data + param->offset) = *(uint32_t *)pBind->buffer;
break; break;
case TSDB_DATA_TYPE_BIGINT: case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_UBIGINT: case TSDB_DATA_TYPE_UBIGINT:
case TSDB_DATA_TYPE_DOUBLE: case TSDB_DATA_TYPE_DOUBLE:
case TSDB_DATA_TYPE_TIMESTAMP: case TSDB_DATA_TYPE_TIMESTAMP:
*(uint64_t *)(data + param->offset) = *(uint64_t *)bind->buffer; *(uint64_t *)(data + param->offset) = *(uint64_t *)pBind->buffer;
break; break;
case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_BINARY:
if ((*bind->length) > (uintptr_t)param->bytes) { if ((*pBind->length) > (uintptr_t)param->bytes) {
tscError("column length is too big"); tscError("column length is too big");
return TSDB_CODE_TSC_INVALID_VALUE; return TSDB_CODE_TSC_INVALID_VALUE;
} }
size = (short)*bind->length; size = (short)*pBind->length;
STR_WITH_SIZE_TO_VARSTR(data + param->offset, bind->buffer, size); STR_WITH_SIZE_TO_VARSTR(data + param->offset, pBind->buffer, size);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
case TSDB_DATA_TYPE_NCHAR: { case TSDB_DATA_TYPE_NCHAR: {
int32_t output = 0; int32_t output = 0;
if (!taosMbsToUcs4(bind->buffer, *bind->length, varDataVal(data + param->offset), param->bytes - VARSTR_HEADER_SIZE, &output)) { if (!taosMbsToUcs4(pBind->buffer, *pBind->length, varDataVal(data + param->offset), param->bytes - VARSTR_HEADER_SIZE, &output)) {
tscError("convert nchar failed"); tscError("convert nchar failed");
return TSDB_CODE_TSC_INVALID_VALUE; return TSDB_CODE_TSC_INVALID_VALUE;
} }
...@@ -889,27 +889,27 @@ static int32_t insertStmtGenBlock(STscStmt* pStmt, STableDataBlocks** pBlock, ST ...@@ -889,27 +889,27 @@ static int32_t insertStmtGenBlock(STscStmt* pStmt, STableDataBlocks** pBlock, ST
} }
static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MULTI_BIND* bind, int32_t rowNum) { static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MULTI_BIND* pBind, int32_t rowNum) {
if (bind->buffer_type != param->type || !isValidDataType(param->type)) { if (pBind->buffer_type != param->type || !isValidDataType(param->type)) {
tscError("column mismatch or invalid"); tscError("column mismatch or invalid");
return TSDB_CODE_TSC_INVALID_VALUE; return TSDB_CODE_TSC_INVALID_VALUE;
} }
if (IS_VAR_DATA_TYPE(param->type) && bind->length == NULL) { if (IS_VAR_DATA_TYPE(param->type) && pBind->length == NULL) {
tscError("BINARY/NCHAR no length"); tscError("BINARY/NCHAR no length");
return TSDB_CODE_TSC_INVALID_VALUE; return TSDB_CODE_TSC_INVALID_VALUE;
} }
for (int i = 0; i < bind->num; ++i) { for (int i = 0; i < pBind->num; ++i) {
char* data = pBlock->pData + sizeof(SSubmitBlk) + pBlock->rowSize * (rowNum + i); char* data = pBlock->pData + sizeof(SSubmitBlk) + pBlock->rowSize * (rowNum + i);
if (bind->is_null != NULL && bind->is_null[i]) { if (pBind->is_null != NULL && pBind->is_null[i]) {
setNull(data + param->offset, param->type, param->bytes); setNull(data + param->offset, param->type, param->bytes);
continue; continue;
} }
if (!IS_VAR_DATA_TYPE(param->type)) { if (!IS_VAR_DATA_TYPE(param->type)) {
memcpy(data + param->offset, (char *)bind->buffer + bind->buffer_length * i, tDataTypes[param->type].bytes); memcpy(data + param->offset, (char *)pBind->buffer + pBind->buffer_length * i, tDataTypes[param->type].bytes);
if (param->offset == 0) { if (param->offset == 0) {
if (tsCheckTimestamp(pBlock, data + param->offset) != TSDB_CODE_SUCCESS) { if (tsCheckTimestamp(pBlock, data + param->offset) != TSDB_CODE_SUCCESS) {
...@@ -918,21 +918,21 @@ static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MU ...@@ -918,21 +918,21 @@ static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MU
} }
} }
} else if (param->type == TSDB_DATA_TYPE_BINARY) { } else if (param->type == TSDB_DATA_TYPE_BINARY) {
if (bind->length[i] > (uintptr_t)param->bytes) { if (pBind->length[i] > (uintptr_t)param->bytes) {
tscError("binary length too long, ignore it, max:%d, actual:%d", param->bytes, (int32_t)bind->length[i]); tscError("binary length too long, ignore it, max:%d, actual:%d", param->bytes, (int32_t)pBind->length[i]);
return TSDB_CODE_TSC_INVALID_VALUE; return TSDB_CODE_TSC_INVALID_VALUE;
} }
int16_t bsize = (short)bind->length[i]; int16_t bsize = (short)pBind->length[i];
STR_WITH_SIZE_TO_VARSTR(data + param->offset, (char *)bind->buffer + bind->buffer_length * i, bsize); STR_WITH_SIZE_TO_VARSTR(data + param->offset, (char *)pBind->buffer + pBind->buffer_length * i, bsize);
} else if (param->type == TSDB_DATA_TYPE_NCHAR) { } else if (param->type == TSDB_DATA_TYPE_NCHAR) {
if (bind->length[i] > (uintptr_t)param->bytes) { if (pBind->length[i] > (uintptr_t)param->bytes) {
tscError("nchar string length too long, ignore it, max:%d, actual:%d", param->bytes, (int32_t)bind->length[i]); tscError("nchar string length too long, ignore it, max:%d, actual:%d", param->bytes, (int32_t)pBind->length[i]);
return TSDB_CODE_TSC_INVALID_VALUE; return TSDB_CODE_TSC_INVALID_VALUE;
} }
int32_t output = 0; int32_t output = 0;
if (!taosMbsToUcs4((char *)bind->buffer + bind->buffer_length * i, bind->length[i], varDataVal(data + param->offset), param->bytes - VARSTR_HEADER_SIZE, &output)) { if (!taosMbsToUcs4((char *)pBind->buffer + pBind->buffer_length * i, pBind->length[i], varDataVal(data + param->offset), param->bytes - VARSTR_HEADER_SIZE, &output)) {
tscError("convert nchar string to UCS4_LE failed:%s", (char*)((char *)bind->buffer + bind->buffer_length * i)); tscError("convert nchar string to UCS4_LE failed:%s", (char*)((char *)pBind->buffer + pBind->buffer_length * i));
return TSDB_CODE_TSC_INVALID_VALUE; return TSDB_CODE_TSC_INVALID_VALUE;
} }
...@@ -943,7 +943,7 @@ static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MU ...@@ -943,7 +943,7 @@ static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MU
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) { static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* pBind) {
SSqlCmd* pCmd = &stmt->pSql->cmd; SSqlCmd* pCmd = &stmt->pSql->cmd;
STscStmt* pStmt = (STscStmt*)stmt; STscStmt* pStmt = (STscStmt*)stmt;
...@@ -995,7 +995,7 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) { ...@@ -995,7 +995,7 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
for (uint32_t j = 0; j < pBlock->numOfParams; ++j) { for (uint32_t j = 0; j < pBlock->numOfParams; ++j) {
SParamInfo* param = &pBlock->params[j]; SParamInfo* param = &pBlock->params[j];
int code = doBindParam(pBlock, data, param, &bind[param->idx], 1); int code = doBindParam(pBlock, data, param, &pBind[param->idx], 1);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
tscDebug("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx); tscDebug("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx);
return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "bind column type mismatch or invalid"); return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "bind column type mismatch or invalid");
...@@ -1006,10 +1006,10 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) { ...@@ -1006,10 +1006,10 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
} }
static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_MULTI_BIND* bind, int colIdx) { static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_MULTI_BIND* pBind, int colIdx) {
SSqlCmd* pCmd = &stmt->pSql->cmd; SSqlCmd* pCmd = &stmt->pSql->cmd;
STscStmt* pStmt = (STscStmt*)stmt; STscStmt* pStmt = (STscStmt*)stmt;
int rowNum = bind->num; int rowNum = pBind->num;
STableDataBlocks* pBlock = NULL; STableDataBlocks* pBlock = NULL;
...@@ -1063,12 +1063,12 @@ static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_MULTI_BIND* bind, int c ...@@ -1063,12 +1063,12 @@ static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_MULTI_BIND* bind, int c
if (colIdx == -1) { if (colIdx == -1) {
for (uint32_t j = 0; j < pBlock->numOfParams; ++j) { for (uint32_t j = 0; j < pBlock->numOfParams; ++j) {
SParamInfo* param = &pBlock->params[j]; SParamInfo* param = &pBlock->params[j];
if (bind[param->idx].num != rowNum) { if (pBind[param->idx].num != rowNum) {
tscError("0x%"PRIx64" param %d: num[%d:%d] not match", pStmt->pSql->self, param->idx, rowNum, bind[param->idx].num); tscError("0x%"PRIx64" param %d: num[%d:%d] not match", pStmt->pSql->self, param->idx, rowNum, pBind[param->idx].num);
return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "bind row num mismatch"); return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "bind row num mismatch");
} }
int code = doBindBatchParam(pBlock, param, &bind[param->idx], pCmd->batchSize); int code = doBindBatchParam(pBlock, param, &pBind[param->idx], pCmd->batchSize);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
tscError("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx); tscError("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx);
return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "bind column type mismatch or invalid"); return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "bind column type mismatch or invalid");
...@@ -1079,7 +1079,7 @@ static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_MULTI_BIND* bind, int c ...@@ -1079,7 +1079,7 @@ static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_MULTI_BIND* bind, int c
} else { } else {
SParamInfo* param = &pBlock->params[colIdx]; SParamInfo* param = &pBlock->params[colIdx];
int code = doBindBatchParam(pBlock, param, bind, pCmd->batchSize); int code = doBindBatchParam(pBlock, param, pBind, pCmd->batchSize);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
tscError("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx); tscError("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx);
return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "bind column type mismatch or invalid"); return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "bind column type mismatch or invalid");
...@@ -1312,8 +1312,8 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) { ...@@ -1312,8 +1312,8 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
return ret; return ret;
} }
int32_t index = 0; int32_t idx = 0;
SStrToken sToken = tStrGetToken(pCmd->insertParam.sql, &index, false); SStrToken sToken = tStrGetToken(pCmd->insertParam.sql, &idx, false);
if (sToken.n == 0) { if (sToken.n == 0) {
tscError("table is is expected, sql:%s", pCmd->insertParam.sql); tscError("table is is expected, sql:%s", pCmd->insertParam.sql);
return tscSQLSyntaxErrMsg(pCmd->payload, "table name is expected", pCmd->insertParam.sql); return tscSQLSyntaxErrMsg(pCmd->payload, "table name is expected", pCmd->insertParam.sql);
...@@ -1333,7 +1333,7 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) { ...@@ -1333,7 +1333,7 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
pStmt->mtb.tagSet = true; pStmt->mtb.tagSet = true;
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false); sToken = tStrGetToken(pCmd->insertParam.sql, &idx, false);
if (sToken.n > 0 && (sToken.type == TK_VALUES || sToken.type == TK_LP)) { if (sToken.n > 0 && (sToken.type == TK_VALUES || sToken.type == TK_LP)) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
...@@ -1343,14 +1343,14 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) { ...@@ -1343,14 +1343,14 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
return tscSQLSyntaxErrMsg(pCmd->payload, "keywords USING is expected", sToken.z ? sToken.z : pCmd->insertParam.sql); return tscSQLSyntaxErrMsg(pCmd->payload, "keywords USING is expected", sToken.z ? sToken.z : pCmd->insertParam.sql);
} }
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false); sToken = tStrGetToken(pCmd->insertParam.sql, &idx, false);
if (sToken.n <= 0 || ((sToken.type != TK_ID) && (sToken.type != TK_STRING))) { if (sToken.n <= 0 || ((sToken.type != TK_ID) && (sToken.type != TK_STRING))) {
tscError("invalid token, sql:%s", pCmd->insertParam.sql); tscError("invalid token, sql:%s", pCmd->insertParam.sql);
return tscSQLSyntaxErrMsg(pCmd->payload, "invalid token", sToken.z ? sToken.z : pCmd->insertParam.sql); return tscSQLSyntaxErrMsg(pCmd->payload, "invalid token", sToken.z ? sToken.z : pCmd->insertParam.sql);
} }
pStmt->mtb.stbname = sToken; pStmt->mtb.stbname = sToken;
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false); sToken = tStrGetToken(pCmd->insertParam.sql, &idx, false);
if (sToken.n <= 0 || ((sToken.type != TK_TAGS) && (sToken.type != TK_LP))) { if (sToken.n <= 0 || ((sToken.type != TK_TAGS) && (sToken.type != TK_LP))) {
tscError("invalid token, sql:%s", pCmd->insertParam.sql); tscError("invalid token, sql:%s", pCmd->insertParam.sql);
return tscSQLSyntaxErrMsg(pCmd->payload, "invalid token", sToken.z ? sToken.z : pCmd->insertParam.sql); return tscSQLSyntaxErrMsg(pCmd->payload, "invalid token", sToken.z ? sToken.z : pCmd->insertParam.sql);
...@@ -1361,9 +1361,9 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) { ...@@ -1361,9 +1361,9 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
if (sToken.type == TK_LP) { if (sToken.type == TK_LP) {
pStmt->mtb.tagColSet = true; pStmt->mtb.tagColSet = true;
pStmt->mtb.tagCols = sToken; pStmt->mtb.tagCols = sToken;
int32_t tagColsStart = index; int32_t tagColsStart = idx;
while (1) { while (1) {
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false); sToken = tStrGetToken(pCmd->insertParam.sql, &idx, false);
if (sToken.type == TK_ILLEGAL) { if (sToken.type == TK_ILLEGAL) {
return tscSQLSyntaxErrMsg(pCmd->payload, "unrecognized token", sToken.z); return tscSQLSyntaxErrMsg(pCmd->payload, "unrecognized token", sToken.z);
} }
...@@ -1378,16 +1378,16 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) { ...@@ -1378,16 +1378,16 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
tscError("tag column list expected, sql:%s", pCmd->insertParam.sql); tscError("tag column list expected, sql:%s", pCmd->insertParam.sql);
return tscSQLSyntaxErrMsg(pCmd->payload, "tag column list expected", pCmd->insertParam.sql); return tscSQLSyntaxErrMsg(pCmd->payload, "tag column list expected", pCmd->insertParam.sql);
} }
pStmt->mtb.tagCols.n = index - tagColsStart + 1; pStmt->mtb.tagCols.n = idx - tagColsStart + 1;
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false); sToken = tStrGetToken(pCmd->insertParam.sql, &idx, false);
if (sToken.n <= 0 || sToken.type != TK_TAGS) { if (sToken.n <= 0 || sToken.type != TK_TAGS) {
tscError("keyword TAGS expected, sql:%s", pCmd->insertParam.sql); tscError("keyword TAGS expected, sql:%s", pCmd->insertParam.sql);
return tscSQLSyntaxErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z ? sToken.z : pCmd->insertParam.sql); return tscSQLSyntaxErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z ? sToken.z : pCmd->insertParam.sql);
} }
} }
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false); sToken = tStrGetToken(pCmd->insertParam.sql, &idx, false);
if (sToken.n <= 0 || sToken.type != TK_LP) { if (sToken.n <= 0 || sToken.type != TK_LP) {
tscError("( expected, sql:%s", pCmd->insertParam.sql); tscError("( expected, sql:%s", pCmd->insertParam.sql);
return tscSQLSyntaxErrMsg(pCmd->payload, "( expected", sToken.z ? sToken.z : pCmd->insertParam.sql); return tscSQLSyntaxErrMsg(pCmd->payload, "( expected", sToken.z ? sToken.z : pCmd->insertParam.sql);
...@@ -1398,7 +1398,7 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) { ...@@ -1398,7 +1398,7 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
int32_t loopCont = 1; int32_t loopCont = 1;
while (loopCont) { while (loopCont) {
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false); sToken = tStrGetToken(pCmd->insertParam.sql, &idx, false);
if (sToken.n <= 0) { if (sToken.n <= 0) {
tscError("unexpected sql end, sql:%s", pCmd->insertParam.sql); tscError("unexpected sql end, sql:%s", pCmd->insertParam.sql);
return tscSQLSyntaxErrMsg(pCmd->payload, "unexpected sql end", pCmd->insertParam.sql); return tscSQLSyntaxErrMsg(pCmd->payload, "unexpected sql end", pCmd->insertParam.sql);
...@@ -1429,7 +1429,7 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) { ...@@ -1429,7 +1429,7 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
return tscSQLSyntaxErrMsg(pCmd->payload, "not match tags", pCmd->insertParam.sql); return tscSQLSyntaxErrMsg(pCmd->payload, "not match tags", pCmd->insertParam.sql);
} }
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false); sToken = tStrGetToken(pCmd->insertParam.sql, &idx, false);
if (sToken.n <= 0 || (sToken.type != TK_VALUES && sToken.type != TK_LP)) { if (sToken.n <= 0 || (sToken.type != TK_VALUES && sToken.type != TK_LP)) {
tscError("sql error, sql:%s", pCmd->insertParam.sql); tscError("sql error, sql:%s", pCmd->insertParam.sql);
return tscSQLSyntaxErrMsg(pCmd->payload, "sql error", sToken.z ? sToken.z : pCmd->insertParam.sql); return tscSQLSyntaxErrMsg(pCmd->payload, "sql error", sToken.z ? sToken.z : pCmd->insertParam.sql);
...@@ -1944,7 +1944,7 @@ int taos_stmt_close(TAOS_STMT* stmt) { ...@@ -1944,7 +1944,7 @@ int taos_stmt_close(TAOS_STMT* stmt) {
STMT_RET(TSDB_CODE_SUCCESS); STMT_RET(TSDB_CODE_SUCCESS);
} }
int taos_stmt_bind_param(TAOS_STMT* stmt, TAOS_BIND* bind) { int taos_stmt_bind_param(TAOS_STMT* stmt, TAOS_BIND* pBind) {
STscStmt* pStmt = (STscStmt*)stmt; STscStmt* pStmt = (STscStmt*)stmt;
STMT_CHECK STMT_CHECK
...@@ -1965,18 +1965,18 @@ int taos_stmt_bind_param(TAOS_STMT* stmt, TAOS_BIND* bind) { ...@@ -1965,18 +1965,18 @@ int taos_stmt_bind_param(TAOS_STMT* stmt, TAOS_BIND* bind) {
tscDebug("tableId:%" PRIu64 ", try to bind one row", pStmt->mtb.currentUid); tscDebug("tableId:%" PRIu64 ", try to bind one row", pStmt->mtb.currentUid);
STMT_RET(insertStmtBindParam(pStmt, bind)); STMT_RET(insertStmtBindParam(pStmt, pBind));
} else { } else {
STMT_RET(normalStmtBindParam(pStmt, bind)); STMT_RET(normalStmtBindParam(pStmt, pBind));
} }
} }
int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind) { int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* pBind) {
STscStmt* pStmt = (STscStmt*)stmt; STscStmt* pStmt = (STscStmt*)stmt;
STMT_CHECK STMT_CHECK
if (bind == NULL || bind->num <= 0 || bind->num > INT16_MAX) { if (pBind == NULL || pBind->num <= 0 || pBind->num > INT16_MAX) {
tscError("0x%"PRIx64" invalid parameter", pStmt->pSql->self); tscError("0x%"PRIx64" invalid parameter", pStmt->pSql->self);
STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "invalid bind param")); STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "invalid bind param"));
} }
...@@ -2000,21 +2000,21 @@ int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind) { ...@@ -2000,21 +2000,21 @@ int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind) {
pStmt->last = STMT_BIND; pStmt->last = STMT_BIND;
STMT_RET(insertStmtBindParamBatch(pStmt, bind, -1)); STMT_RET(insertStmtBindParamBatch(pStmt, pBind, -1));
} }
int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int colIdx) { int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* pBind, int colIdx) {
STscStmt* pStmt = (STscStmt*)stmt; STscStmt* pStmt = (STscStmt*)stmt;
STMT_CHECK STMT_CHECK
if (bind == NULL) { if (pBind == NULL) {
tscError("0x%" PRIx64 " invalid parameter: bind is NULL", pStmt->pSql->self); tscError("0x%" PRIx64 " invalid parameter: bind is NULL", pStmt->pSql->self);
STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "invalid bind param: bind is NULL")); STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "invalid bind param: bind is NULL"));
} }
if (bind->num <= 0 || bind->num > INT16_MAX) { if (pBind->num <= 0 || pBind->num > INT16_MAX) {
char errMsg[128]; char errMsg[128];
sprintf(errMsg, "invalid parameter: bind->num:%d out of range [0, %d)", bind->num, INT16_MAX); sprintf(errMsg, "invalid parameter: bind->num:%d out of range [0, %d)", pBind->num, INT16_MAX);
tscError("0x%" PRIx64 " %s", pStmt->pSql->self, errMsg); tscError("0x%" PRIx64 " %s", pStmt->pSql->self, errMsg);
STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), errMsg)); STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), errMsg));
} }
...@@ -2045,7 +2045,7 @@ int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, in ...@@ -2045,7 +2045,7 @@ int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, in
pStmt->last = STMT_BIND_COL; pStmt->last = STMT_BIND_COL;
STMT_RET(insertStmtBindParamBatch(pStmt, bind, colIdx)); STMT_RET(insertStmtBindParamBatch(pStmt, pBind, colIdx));
} }
int taos_stmt_add_batch(TAOS_STMT* stmt) { int taos_stmt_add_batch(TAOS_STMT* stmt) {
......
此差异已折叠。
...@@ -748,13 +748,13 @@ static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STab ...@@ -748,13 +748,13 @@ static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STab
int32_t vgId = -1; int32_t vgId = -1;
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
int32_t index = pTableMetaInfo->vgroupIndex; int32_t idx = pTableMetaInfo->vgroupIndex;
assert(index >= 0); assert(idx >= 0);
SVgroupMsg* pVgroupInfo = NULL; SVgroupMsg* pVgroupInfo = NULL;
if (pTableMetaInfo->vgroupList && pTableMetaInfo->vgroupList->numOfVgroups > 0) { if (pTableMetaInfo->vgroupList && pTableMetaInfo->vgroupList->numOfVgroups > 0) {
assert(index < pTableMetaInfo->vgroupList->numOfVgroups); assert(idx < pTableMetaInfo->vgroupList->numOfVgroups);
pVgroupInfo = &pTableMetaInfo->vgroupList->vgroups[index]; pVgroupInfo = &pTableMetaInfo->vgroupList->vgroups[idx];
} else { } else {
tscError("0x%"PRIx64" No vgroup info found", pSql->self); tscError("0x%"PRIx64" No vgroup info found", pSql->self);
...@@ -764,7 +764,7 @@ static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STab ...@@ -764,7 +764,7 @@ static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STab
vgId = pVgroupInfo->vgId; vgId = pVgroupInfo->vgId;
tscSetDnodeEpSet(&pSql->epSet, pVgroupInfo); tscSetDnodeEpSet(&pSql->epSet, pVgroupInfo);
tscDebug("0x%"PRIx64" query on stable, vgIndex:%d, numOfVgroups:%d", pSql->self, index, pTableMetaInfo->vgroupList->numOfVgroups); tscDebug("0x%"PRIx64" query on stable, vgIndex:%d, numOfVgroups:%d", pSql->self, idx, pTableMetaInfo->vgroupList->numOfVgroups);
} else { } else {
vgId = pTableMeta->vgId; vgId = pTableMeta->vgId;
...@@ -786,11 +786,11 @@ static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STab ...@@ -786,11 +786,11 @@ static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STab
pQueryMsg->numOfTables = htonl(1); // set the number of tables pQueryMsg->numOfTables = htonl(1); // set the number of tables
pMsg += sizeof(STableIdInfo); pMsg += sizeof(STableIdInfo);
} else { // it is a subquery of the super table query, this EP info is acquired from vgroupInfo } else { // it is a subquery of the super table query, this EP info is acquired from vgroupInfo
int32_t index = pTableMetaInfo->vgroupIndex; int32_t idx = pTableMetaInfo->vgroupIndex;
int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables); int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables);
assert(index >= 0 && index < numOfVgroups); assert(idx >= 0 && idx < numOfVgroups);
SVgroupTableInfo* pTableIdList = taosArrayGet(pTableMetaInfo->pVgroupTables, index); SVgroupTableInfo* pTableIdList = taosArrayGet(pTableMetaInfo->pVgroupTables, idx);
// set the vgroup info // set the vgroup info
tscSetDnodeEpSet(&pSql->epSet, &pTableIdList->vgInfo); tscSetDnodeEpSet(&pSql->epSet, &pTableIdList->vgInfo);
...@@ -800,7 +800,7 @@ static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STab ...@@ -800,7 +800,7 @@ static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STab
pQueryMsg->numOfTables = htonl(numOfTables); // set the number of tables pQueryMsg->numOfTables = htonl(numOfTables); // set the number of tables
tscDebug("0x%"PRIx64" query on stable, vgId:%d, numOfTables:%d, vgIndex:%d, numOfVgroups:%d", pSql->self, tscDebug("0x%"PRIx64" query on stable, vgId:%d, numOfTables:%d, vgIndex:%d, numOfVgroups:%d", pSql->self,
pTableIdList->vgInfo.vgId, numOfTables, index, numOfVgroups); pTableIdList->vgInfo.vgId, numOfTables, idx, numOfVgroups);
// serialize each table id info // serialize each table id info
for(int32_t i = 0; i < numOfTables; ++i) { for(int32_t i = 0; i < numOfTables; ++i) {
...@@ -1113,7 +1113,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1113,7 +1113,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pQueryMsg->tsBuf.tsOffset = htonl((int32_t)(pMsg - pCmd->payload)); pQueryMsg->tsBuf.tsOffset = htonl((int32_t)(pMsg - pCmd->payload));
if (pQueryInfo->tsBuf != NULL) { if (pQueryInfo->tsBuf != NULL) {
// note: here used the index instead of actual vnode id. // note: here used the idx instead of actual vnode id.
int32_t vnodeIndex = pTableMetaInfo->vgroupIndex; int32_t vnodeIndex = pTableMetaInfo->vgroupIndex;
code = dumpFileBlockByGroupId(pQueryInfo->tsBuf, vnodeIndex, pMsg, &pQueryMsg->tsBuf.tsLen, &pQueryMsg->tsBuf.tsNumOfBlocks); code = dumpFileBlockByGroupId(pQueryInfo->tsBuf, vnodeIndex, pMsg, &pQueryMsg->tsBuf.tsLen, &pQueryMsg->tsBuf.tsNumOfBlocks);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
...@@ -1258,10 +1258,10 @@ int32_t tscBuildCreateDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1258,10 +1258,10 @@ int32_t tscBuildCreateDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
} }
static bool tscIsAlterCommand(char* sqlstr) { static bool tscIsAlterCommand(char* sqlstr) {
int32_t index = 0; int32_t idx = 0;
do { do {
SStrToken t0 = tStrGetToken(sqlstr, &index, false); SStrToken t0 = tStrGetToken(sqlstr, &idx, false);
if (t0.type != TK_LP) { if (t0.type != TK_LP) {
return t0.type == TK_ALTER; return t0.type == TK_ALTER;
} }
...@@ -2643,18 +2643,18 @@ int tscProcessShowRsp(SSqlObj *pSql) { ...@@ -2643,18 +2643,18 @@ int tscProcessShowRsp(SSqlObj *pSql) {
SFieldInfo* pFieldInfo = &pQueryInfo->fieldsInfo; SFieldInfo* pFieldInfo = &pQueryInfo->fieldsInfo;
SColumnIndex index = {0}; SColumnIndex idx = {0};
pSchema = pMetaMsg->schema; pSchema = pMetaMsg->schema;
uint64_t uid = pTableMetaInfo->pTableMeta->id.uid; uint64_t uid = pTableMetaInfo->pTableMeta->id.uid;
for (int16_t i = 0; i < pMetaMsg->numOfColumns; ++i, ++pSchema) { for (int16_t i = 0; i < pMetaMsg->numOfColumns; ++i, ++pSchema) {
index.columnIndex = i; idx.columnIndex = i;
tscColumnListInsert(pQueryInfo->colList, i, uid, pSchema); tscColumnListInsert(pQueryInfo->colList, i, uid, pSchema);
TAOS_FIELD f = tscCreateField(pSchema->type, pSchema->name, pSchema->bytes); TAOS_FIELD f = tscCreateField(pSchema->type, pSchema->name, pSchema->bytes);
SInternalField* pInfo = tscFieldInfoAppend(pFieldInfo, &f); SInternalField* pInfo = tscFieldInfoAppend(pFieldInfo, &f);
pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &idx,
pTableSchema[i].type, pTableSchema[i].bytes, getNewResColId(pCmd), pTableSchema[i].bytes, false); pTableSchema[i].type, pTableSchema[i].bytes, getNewResColId(pCmd), pTableSchema[i].bytes, false);
} }
...@@ -3481,4 +3481,4 @@ void tscInitMsgsFp() { ...@@ -3481,4 +3481,4 @@ void tscInitMsgsFp() {
tscKeepConn[TSDB_SQL_SELECT] = 1; tscKeepConn[TSDB_SQL_SELECT] = 1;
tscKeepConn[TSDB_SQL_FETCH] = 1; tscKeepConn[TSDB_SQL_FETCH] = 1;
tscKeepConn[TSDB_SQL_HB] = 1; tscKeepConn[TSDB_SQL_HB] = 1;
} }
\ No newline at end of file
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
typedef struct SInsertSupporter { typedef struct SInsertSupporter {
SSqlObj* pSql; SSqlObj* pSql;
int32_t index; int32_t idx;
} SInsertSupporter; } SInsertSupporter;
static void freeJoinSubqueryObj(SSqlObj* pSql); static void freeJoinSubqueryObj(SSqlObj* pSql);
...@@ -84,14 +84,14 @@ static bool allSubqueryDone(SSqlObj *pParentSql) { ...@@ -84,14 +84,14 @@ static bool allSubqueryDone(SSqlObj *pParentSql) {
for (int i = 0; i < subState->numOfSub; i++) { for (int i = 0; i < subState->numOfSub; i++) {
SSqlObj* pSub = pParentSql->pSubs[i]; SSqlObj* pSub = pParentSql->pSubs[i];
if (0 == subState->states[i]) { if (0 == subState->states[i]) {
tscDebug("0x%"PRIx64" subquery:0x%"PRIx64", index: %d NOT finished yet", pParentSql->self, pSub->self, i); tscDebug("0x%"PRIx64" subquery:0x%"PRIx64", idx: %d NOT finished yet", pParentSql->self, pSub->self, i);
done = false; done = false;
break; break;
} else { } else {
if (pSub != NULL) { if (pSub != NULL) {
tscDebug("0x%"PRIx64" subquery:0x%"PRIx64", index: %d finished", pParentSql->self, pSub->self, i); tscDebug("0x%"PRIx64" subquery:0x%"PRIx64", idx: %d finished", pParentSql->self, pSub->self, i);
} else { } else {
tscDebug("0x%"PRIx64" subquery:%p, index: %d finished", pParentSql->self, pSub, i); tscDebug("0x%"PRIx64" subquery:%p, idx: %d finished", pParentSql->self, pSub, i);
} }
} }
} }
...@@ -105,7 +105,7 @@ bool subAndCheckDone(SSqlObj *pSql, SSqlObj *pParentSql, int idx) { ...@@ -105,7 +105,7 @@ bool subAndCheckDone(SSqlObj *pSql, SSqlObj *pParentSql, int idx) {
pthread_mutex_lock(&subState->mutex); pthread_mutex_lock(&subState->mutex);
tscDebug("0x%"PRIx64" subquery:0x%"PRIx64", index:%d state set to 1", pParentSql->self, pSql->self, idx); tscDebug("0x%"PRIx64" subquery:0x%"PRIx64", idx:%d state set to 1", pParentSql->self, pSql->self, idx);
subState->states[idx] = 1; subState->states[idx] = 1;
bool done = allSubqueryDone(pParentSql); bool done = allSubqueryDone(pParentSql);
...@@ -383,7 +383,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) { ...@@ -383,7 +383,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) {
// todo handle failed to create sub query // todo handle failed to create sub query
SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, int32_t index) { SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, int32_t idx) {
SJoinSupporter* pSupporter = calloc(1, sizeof(SJoinSupporter)); SJoinSupporter* pSupporter = calloc(1, sizeof(SJoinSupporter));
if (pSupporter == NULL) { if (pSupporter == NULL) {
return NULL; return NULL;
...@@ -391,7 +391,7 @@ SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, int32_t index) { ...@@ -391,7 +391,7 @@ SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, int32_t index) {
pSupporter->pObj = pSql->self; pSupporter->pObj = pSql->self;
pSupporter->subqueryIndex = index; pSupporter->subqueryIndex = idx;
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd); SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
memcpy(&pSupporter->interval, &pQueryInfo->interval, sizeof(pSupporter->interval)); memcpy(&pSupporter->interval, &pQueryInfo->interval, sizeof(pSupporter->interval));
...@@ -403,7 +403,7 @@ SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, int32_t index) { ...@@ -403,7 +403,7 @@ SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, int32_t index) {
pSupporter->numOfFillVal = pQueryInfo->numOfFillVal; pSupporter->numOfFillVal = pQueryInfo->numOfFillVal;
} }
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, index); STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, idx);
pSupporter->uid = pTableMetaInfo->pTableMeta->id.uid; pSupporter->uid = pTableMetaInfo->pTableMeta->id.uid;
assert (pSupporter->uid != 0); assert (pSupporter->uid != 0);
...@@ -614,7 +614,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) { ...@@ -614,7 +614,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
* during the timestamp intersection. * during the timestamp intersection.
*/ */
pSupporter->limit = pQueryInfo->limit; pSupporter->limit = pQueryInfo->limit;
SColumnIndex index = {.tableIndex = 0, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX}; SColumnIndex idx = {.tableIndex = 0, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX};
SSchema* s = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, 0); SSchema* s = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, 0);
SExprInfo* pExpr = tscExprGet(pQueryInfo, 0); SExprInfo* pExpr = tscExprGet(pQueryInfo, 0);
...@@ -626,7 +626,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) { ...@@ -626,7 +626,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
int16_t functionId = tscIsProjectionQuery(pQueryInfo)? TSDB_FUNC_PRJ : TSDB_FUNC_TS; int16_t functionId = tscIsProjectionQuery(pQueryInfo)? TSDB_FUNC_PRJ : TSDB_FUNC_TS;
tscAddFuncInSelectClause(pQueryInfo, 0, functionId, &index, s, TSDB_COL_NORMAL, getNewResColId(&pNew->cmd)); tscAddFuncInSelectClause(pQueryInfo, 0, functionId, &idx, s, TSDB_COL_NORMAL, getNewResColId(&pNew->cmd));
tscPrintSelNodeList(pNew, 0); tscPrintSelNodeList(pNew, 0);
tscFieldInfoUpdateOffset(pQueryInfo); tscFieldInfoUpdateOffset(pQueryInfo);
...@@ -836,8 +836,8 @@ static void issueTsCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj* ...@@ -836,8 +836,8 @@ static void issueTsCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj*
SSchema colSchema = {.type = TSDB_DATA_TYPE_BINARY, .bytes = 1}; SSchema colSchema = {.type = TSDB_DATA_TYPE_BINARY, .bytes = 1};
SColumnIndex index = {0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; SColumnIndex idx = {0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
SExprInfo *pExpr = tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TS_COMP, &index, &colSchema, TSDB_COL_NORMAL, getNewResColId(pCmd)); SExprInfo *pExpr = tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TS_COMP, &idx, &colSchema, TSDB_COL_NORMAL, getNewResColId(pCmd));
// set the tags value for ts_comp function // set the tags value for ts_comp function
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
...@@ -1280,7 +1280,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow ...@@ -1280,7 +1280,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
// todo retry if other subqueries are not failed // todo retry if other subqueries are not failed
assert(numOfRows < 0 && numOfRows == taos_errno(pSql)); assert(numOfRows < 0 && numOfRows == taos_errno(pSql));
tscError("0x%"PRIx64" sub query failed, code:%s, index:%d", pSql->self, tstrerror(numOfRows), pSupporter->subqueryIndex); tscError("0x%"PRIx64" sub query failed, code:%s, idx:%d", pSql->self, tstrerror(numOfRows), pSupporter->subqueryIndex);
pParentSql->res.code = numOfRows; pParentSql->res.code = numOfRows;
if (quitAllSubquery(pSql, pParentSql, pSupporter)) { if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
...@@ -1336,7 +1336,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow ...@@ -1336,7 +1336,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
pTableMetaInfo->vgroupIndex += 1; pTableMetaInfo->vgroupIndex += 1;
assert(pTableMetaInfo->vgroupIndex < totalVgroups); assert(pTableMetaInfo->vgroupIndex < totalVgroups);
tscDebug("0x%"PRIx64" tid_tag from vgroup index:%d completed, try next vgroup:%d. total vgroups:%d. current numOfRes:%d", tscDebug("0x%"PRIx64" tid_tag from vgroup idx:%d completed, try next vgroup:%d. total vgroups:%d. current numOfRes:%d",
pSql->self, pTableMetaInfo->vgroupIndex - 1, pTableMetaInfo->vgroupIndex, totalVgroups, pSupporter->num); pSql->self, pTableMetaInfo->vgroupIndex - 1, pTableMetaInfo->vgroupIndex, totalVgroups, pSupporter->num);
pCmd->command = TSDB_SQL_SELECT; pCmd->command = TSDB_SQL_SELECT;
...@@ -1447,7 +1447,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow ...@@ -1447,7 +1447,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) { if (taos_errno(pSql) != TSDB_CODE_SUCCESS) {
// todo retry if other subqueries are not failed yet // todo retry if other subqueries are not failed yet
assert(numOfRows < 0 && numOfRows == taos_errno(pSql)); assert(numOfRows < 0 && numOfRows == taos_errno(pSql));
tscError("0x%"PRIx64" sub query failed, code:%s, index:%d", pSql->self, tstrerror(numOfRows), pSupporter->subqueryIndex); tscError("0x%"PRIx64" sub query failed, code:%s, idx:%d", pSql->self, tstrerror(numOfRows), pSupporter->subqueryIndex);
pParentSql->res.code = numOfRows; pParentSql->res.code = numOfRows;
if (quitAllSubquery(pSql, pParentSql, pSupporter)){ if (quitAllSubquery(pSql, pParentSql, pSupporter)){
...@@ -1525,7 +1525,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow ...@@ -1525,7 +1525,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
pTableMetaInfo->vgroupIndex += 1; pTableMetaInfo->vgroupIndex += 1;
assert(pTableMetaInfo->vgroupIndex < totalVgroups); assert(pTableMetaInfo->vgroupIndex < totalVgroups);
tscDebug("0x%"PRIx64" results from vgroup index:%d completed, try next vgroup:%d. total vgroups:%d. current numOfRes:%" PRId64, tscDebug("0x%"PRIx64" results from vgroup idx:%d completed, try next vgroup:%d. total vgroups:%d. current numOfRes:%" PRId64,
pSql->self, pTableMetaInfo->vgroupIndex - 1, pTableMetaInfo->vgroupIndex, totalVgroups, pSql->self, pTableMetaInfo->vgroupIndex - 1, pTableMetaInfo->vgroupIndex, totalVgroups,
pRes->numOfClauseTotal); pRes->numOfClauseTotal);
...@@ -1610,7 +1610,7 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR ...@@ -1610,7 +1610,7 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
assert(numOfRows == taos_errno(pSql)); assert(numOfRows == taos_errno(pSql));
pParentSql->res.code = numOfRows; pParentSql->res.code = numOfRows;
tscError("0x%"PRIx64" retrieve failed, index:%d, code:%s", pSql->self, pSupporter->subqueryIndex, tstrerror(numOfRows)); tscError("0x%"PRIx64" retrieve failed, idx:%d, code:%s", pSql->self, pSupporter->subqueryIndex, tstrerror(numOfRows));
tscAsyncResultOnError(pParentSql); tscAsyncResultOnError(pParentSql);
goto _return; goto _return;
...@@ -1670,7 +1670,7 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR ...@@ -1670,7 +1670,7 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
pParentSql->res.precision = pRes1->precision; pParentSql->res.precision = pRes1->precision;
if (pRes1->row > 0 && pRes1->numOfRows > 0) { if (pRes1->row > 0 && pRes1->numOfRows > 0) {
tscDebug("0x%"PRIx64" sub:0x%"PRIx64" index:%d numOfRows:%d total:%"PRId64 " (not retrieve)", pParentSql->self, tscDebug("0x%"PRIx64" sub:0x%"PRIx64" idx:%d numOfRows:%d total:%"PRId64 " (not retrieve)", pParentSql->self,
pParentSql->pSubs[i]->self, i, pRes1->numOfRows, pRes1->numOfTotal); pParentSql->pSubs[i]->self, i, pRes1->numOfRows, pRes1->numOfTotal);
assert(pRes1->row < pRes1->numOfRows || (pRes1->row == pRes1->numOfRows && pRes1->completed)); assert(pRes1->row < pRes1->numOfRows || (pRes1->row == pRes1->numOfRows && pRes1->completed));
} else { } else {
...@@ -1678,7 +1678,7 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR ...@@ -1678,7 +1678,7 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
pRes1->numOfClauseTotal += pRes1->numOfRows; pRes1->numOfClauseTotal += pRes1->numOfRows;
} }
tscDebug("0x%"PRIx64" sub:0x%"PRIx64" index:%d numOfRows:%d total:%"PRId64, pParentSql->self, tscDebug("0x%"PRIx64" sub:0x%"PRIx64" idx:%d numOfRows:%d total:%"PRId64, pParentSql->self,
pParentSql->pSubs[i]->self, i, pRes1->numOfRows, pRes1->numOfTotal); pParentSql->pSubs[i]->self, i, pRes1->numOfRows, pRes1->numOfTotal);
} }
} }
...@@ -1879,7 +1879,7 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) { ...@@ -1879,7 +1879,7 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) {
} }
} }
// all subqueries return, set the result output index // all subqueries return, set the result output idx
void tscSetupOutputColumnIndex(SSqlObj* pSql) { void tscSetupOutputColumnIndex(SSqlObj* pSql) {
SSqlCmd* pCmd = &pSql->cmd; SSqlCmd* pCmd = &pSql->cmd;
SSqlRes* pRes = &pSql->res; SSqlRes* pRes = &pSql->res;
...@@ -2567,7 +2567,7 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) { ...@@ -2567,7 +2567,7 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
int32_t numOfExprs = (int32_t) tscNumOfExprs(pQueryInfo); int32_t numOfExprs = (int32_t) tscNumOfExprs(pQueryInfo);
int32_t index = 0; int32_t idx = 0;
for(int32_t i = 0; i < numOfExprs; ++i) { for(int32_t i = 0; i < numOfExprs; ++i) {
SExprInfo* pExpr = tscExprGet(pQueryInfo, i); SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
if (pExpr->base.functionId == TSDB_FUNC_TS && pQueryInfo->interval.interval > 0) { if (pExpr->base.functionId == TSDB_FUNC_TS && pQueryInfo->interval.interval > 0) {
...@@ -2576,7 +2576,7 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) { ...@@ -2576,7 +2576,7 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
SColumnIndex colIndex = {.tableIndex = 0, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX}; SColumnIndex colIndex = {.tableIndex = 0, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX};
SSchema* schema = tscGetColumnSchemaById(pTableMetaInfo1->pTableMeta, pExpr->base.colInfo.colId); SSchema* schema = tscGetColumnSchemaById(pTableMetaInfo1->pTableMeta, pExpr->base.colInfo.colId);
SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_TS, &colIndex, schema, TSDB_COL_NORMAL, getNewResColId(pCmd)); SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, idx++, TSDB_FUNC_TS, &colIndex, schema, TSDB_COL_NORMAL, getNewResColId(pCmd));
p->base.resColId = pExpr->base.resColId; // update the result column id p->base.resColId = pExpr->base.resColId; // update the result column id
} else if (pExpr->base.functionId == TSDB_FUNC_STDDEV_DST) { } else if (pExpr->base.functionId == TSDB_FUNC_STDDEV_DST) {
taosArrayPush(pSup->pColsInfo, &pExpr->base.resColId); taosArrayPush(pSup->pColsInfo, &pExpr->base.resColId);
...@@ -2585,7 +2585,7 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) { ...@@ -2585,7 +2585,7 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
SSchema schema = {.type = TSDB_DATA_TYPE_DOUBLE, .bytes = sizeof(double)}; SSchema schema = {.type = TSDB_DATA_TYPE_DOUBLE, .bytes = sizeof(double)};
tstrncpy(schema.name, pExpr->base.aliasName, tListLen(schema.name)); tstrncpy(schema.name, pExpr->base.aliasName, tListLen(schema.name));
SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_AVG, &colIndex, &schema, TSDB_COL_NORMAL, getNewResColId(pCmd)); SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, idx++, TSDB_FUNC_AVG, &colIndex, &schema, TSDB_COL_NORMAL, getNewResColId(pCmd));
p->base.resColId = pExpr->base.resColId; // update the result column id p->base.resColId = pExpr->base.resColId; // update the result column id
} else if (pExpr->base.functionId == TSDB_FUNC_TAG) { } else if (pExpr->base.functionId == TSDB_FUNC_TAG) {
pSup->tagLen += pExpr->base.resBytes; pSup->tagLen += pExpr->base.resBytes;
...@@ -2598,7 +2598,7 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) { ...@@ -2598,7 +2598,7 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
schema = tGetTbnameColumnSchema(); schema = tGetTbnameColumnSchema();
} }
SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_TAG, &colIndex, schema, TSDB_COL_TAG, getNewResColId(pCmd)); SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, idx++, TSDB_FUNC_TAG, &colIndex, schema, TSDB_COL_TAG, getNewResColId(pCmd));
if (schema->type == TSDB_DATA_TYPE_JSON){ if (schema->type == TSDB_DATA_TYPE_JSON){
p->base.numOfParams = pExpr->base.numOfParams; p->base.numOfParams = pExpr->base.numOfParams;
tVariantAssign(&p->base.param[0], &pExpr->base.param[0]); tVariantAssign(&p->base.param[0], &pExpr->base.param[0]);
...@@ -2616,7 +2616,7 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) { ...@@ -2616,7 +2616,7 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
SSchema* schema = tscGetColumnSchemaById(pTableMetaInfo1->pTableMeta, pExpr->base.colInfo.colId); SSchema* schema = tscGetColumnSchemaById(pTableMetaInfo1->pTableMeta, pExpr->base.colInfo.colId);
//doLimitOutputNormalColOfGroupby //doLimitOutputNormalColOfGroupby
SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_PRJ, &colIndex, schema, TSDB_COL_NORMAL, getNewResColId(pCmd)); SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, idx++, TSDB_FUNC_PRJ, &colIndex, schema, TSDB_COL_NORMAL, getNewResColId(pCmd));
p->base.numOfParams = 1; p->base.numOfParams = 1;
p->base.param[0].i64 = 1; p->base.param[0].i64 = 1;
p->base.param[0].nType = TSDB_DATA_TYPE_INT; p->base.param[0].nType = TSDB_DATA_TYPE_INT;
...@@ -2658,7 +2658,7 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) { ...@@ -2658,7 +2658,7 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
"0x%"PRIx64" first round subquery:0x%"PRIx64" tableIndex:%d, vgroupIndex:%d, numOfVgroups:%d, type:%d, query to retrieve timestamps, " "0x%"PRIx64" first round subquery:0x%"PRIx64" tableIndex:%d, vgroupIndex:%d, numOfVgroups:%d, type:%d, query to retrieve timestamps, "
"numOfExpr:%" PRIzu ", colList:%d, numOfOutputFields:%d, name:%s", "numOfExpr:%" PRIzu ", colList:%d, numOfOutputFields:%d, name:%s",
pSql->self, pNew->self, 0, pTableMetaInfo->vgroupIndex, pTableMetaInfo->vgroupList->numOfVgroups, pNewQueryInfo->type, pSql->self, pNew->self, 0, pTableMetaInfo->vgroupIndex, pTableMetaInfo->vgroupList->numOfVgroups, pNewQueryInfo->type,
tscNumOfExprs(pNewQueryInfo), index+1, pNewQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name)); tscNumOfExprs(pNewQueryInfo), idx+1, pNewQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name));
pSql->pSubs = calloc(1, POINTER_BYTES); pSql->pSubs = calloc(1, POINTER_BYTES);
if (pSql->pSubs == NULL) { if (pSql->pSubs == NULL) {
...@@ -3281,7 +3281,7 @@ SSqlObj *tscCreateSTableSubquery(SSqlObj *pSql, SRetrieveSupport *trsupport, SSq ...@@ -3281,7 +3281,7 @@ SSqlObj *tscCreateSTableSubquery(SSqlObj *pSql, SRetrieveSupport *trsupport, SSq
assert(trsupport->subqueryIndex < pSql->subState.numOfSub); assert(trsupport->subqueryIndex < pSql->subState.numOfSub);
// launch subquery for each vnode, so the subquery index equals to the vgroupIndex. // launch subquery for each vnode, so the subquery idx equals to the vgroupIndex.
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, table_index); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, table_index);
pTableMetaInfo->vgroupIndex = trsupport->subqueryIndex; pTableMetaInfo->vgroupIndex = trsupport->subqueryIndex;
...@@ -3411,7 +3411,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows) ...@@ -3411,7 +3411,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
} }
} }
if (!subAndCheckDone(tres, pParentObj, pSupporter->index)) { if (!subAndCheckDone(tres, pParentObj, pSupporter->idx)) {
// concurrency problem, other thread already release pParentObj // concurrency problem, other thread already release pParentObj
//tscDebug("0x%"PRIx64" insert:%p,%d completed, total:%d", pParentObj->self, tres, suppIdx, pParentObj->subState.numOfSub); //tscDebug("0x%"PRIx64" insert:%p,%d completed, total:%d", pParentObj->self, tres, suppIdx, pParentObj->subState.numOfSub);
return; return;
...@@ -3495,9 +3495,9 @@ int32_t tscHandleInsertRetry(SSqlObj* pParent, SSqlObj* pSql) { ...@@ -3495,9 +3495,9 @@ int32_t tscHandleInsertRetry(SSqlObj* pParent, SSqlObj* pSql) {
SSqlRes* pRes = &pSql->res; SSqlRes* pRes = &pSql->res;
SInsertSupporter* pSupporter = (SInsertSupporter*) pSql->param; SInsertSupporter* pSupporter = (SInsertSupporter*) pSql->param;
assert(pSupporter->index < pSupporter->pSql->subState.numOfSub); assert(pSupporter->idx < pSupporter->pSql->subState.numOfSub);
STableDataBlocks* pTableDataBlock = taosArrayGetP(pParent->cmd.insertParam.pDataBlocks, pSupporter->index); STableDataBlocks* pTableDataBlock = taosArrayGetP(pParent->cmd.insertParam.pDataBlocks, pSupporter->idx);
int32_t code = tscCopyDataBlockToPayload(pSql, pTableDataBlock); int32_t code = tscCopyDataBlockToPayload(pSql, pTableDataBlock);
if ((pRes->code = code)!= TSDB_CODE_SUCCESS) { if ((pRes->code = code)!= TSDB_CODE_SUCCESS) {
...@@ -3524,7 +3524,7 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) { ...@@ -3524,7 +3524,7 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) { for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
SSqlObj* pSub = pSql->pSubs[i]; SSqlObj* pSub = pSql->pSubs[i];
SInsertSupporter* pSup = calloc(1, sizeof(SInsertSupporter)); SInsertSupporter* pSup = calloc(1, sizeof(SInsertSupporter));
pSup->index = i; pSup->idx = i;
pSup->pSql = pSql; pSup->pSql = pSql;
pSub->param = pSup; pSub->param = pSup;
...@@ -3572,7 +3572,7 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) { ...@@ -3572,7 +3572,7 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
} }
pSupporter->pSql = pSql; pSupporter->pSql = pSql;
pSupporter->index = numOfSub; pSupporter->idx = numOfSub;
SSqlObj *pNew = createSimpleSubObj(pSql, multiVnodeInsertFinalize, pSupporter, TSDB_SQL_INSERT); SSqlObj *pNew = createSimpleSubObj(pSql, multiVnodeInsertFinalize, pSupporter, TSDB_SQL_INSERT);
if (pNew == NULL) { if (pNew == NULL) {
...@@ -3763,19 +3763,19 @@ void tscBuildResFromSubqueries(SSqlObj *pSql) { ...@@ -3763,19 +3763,19 @@ void tscBuildResFromSubqueries(SSqlObj *pSql) {
char * getScalarExprInputSrc(void *param, const char *name, int32_t colId) { char * getScalarExprInputSrc(void *param, const char *name, int32_t colId) {
SScalarExprSupport*pSupport = (SScalarExprSupport*) param; SScalarExprSupport*pSupport = (SScalarExprSupport*) param;
int32_t index = -1; int32_t idx = -1;
SExprInfo* pExpr = NULL; SExprInfo* pExpr = NULL;
for (int32_t i = 0; i < pSupport->numOfCols; ++i) { for (int32_t i = 0; i < pSupport->numOfCols; ++i) {
pExpr = taosArrayGetP(pSupport->exprList, i); pExpr = taosArrayGetP(pSupport->exprList, i);
if (strncmp(name, pExpr->base.aliasName, sizeof(pExpr->base.aliasName) - 1) == 0) { if (strncmp(name, pExpr->base.aliasName, sizeof(pExpr->base.aliasName) - 1) == 0) {
index = i; idx = i;
break; break;
} }
} }
assert(index >= 0 && index < pSupport->numOfCols); assert(idx >= 0 && idx < pSupport->numOfCols);
return pSupport->data[index] + pSupport->offset * pExpr->base.resBytes; return pSupport->data[idx] + pSupport->offset * pExpr->base.resBytes;
} }
TAOS_ROW doSetResultRowData(SSqlObj *pSql) { TAOS_ROW doSetResultRowData(SSqlObj *pSql) {
...@@ -3815,7 +3815,7 @@ TAOS_ROW doSetResultRowData(SSqlObj *pSql) { ...@@ -3815,7 +3815,7 @@ TAOS_ROW doSetResultRowData(SSqlObj *pSql) {
j += 1; j += 1;
} }
pRes->row++; // index increase one-step pRes->row++; // idx increase one-step
return pRes->tsrow; return pRes->tsrow;
} }
...@@ -3959,7 +3959,7 @@ void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGr ...@@ -3959,7 +3959,7 @@ void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGr
pthread_mutex_init(&pQInfo->lock, NULL); pthread_mutex_init(&pQInfo->lock, NULL);
tsem_init(&pQInfo->ready, 0, 0); tsem_init(&pQInfo->ready, 0, 0);
int32_t index = 0; int32_t idx = 0;
for(int32_t i = 0; i < numOfGroups; ++i) { for(int32_t i = 0; i < numOfGroups; ++i) {
SArray* pa = taosArrayGetP(pQueryAttr->tableGroupInfo.pGroupList, i); SArray* pa = taosArrayGetP(pQueryAttr->tableGroupInfo.pGroupList, i);
...@@ -3976,7 +3976,7 @@ void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGr ...@@ -3976,7 +3976,7 @@ void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGr
STableKeyInfo* info = taosArrayGet(pa, j); STableKeyInfo* info = taosArrayGet(pa, j);
window.skey = info->lastKey; window.skey = info->lastKey;
void* buf = (char*) pQInfo->pBuf + index * sizeof(STableQueryInfo); void* buf = (char*) pQInfo->pBuf + idx * sizeof(STableQueryInfo);
STableQueryInfo* item = createTableQueryInfo(pQueryAttr, info->pTable, pQueryAttr->groupbyColumn, window, buf); STableQueryInfo* item = createTableQueryInfo(pQueryAttr, info->pTable, pQueryAttr->groupbyColumn, window, buf);
if (item == NULL) { if (item == NULL) {
goto _cleanup; goto _cleanup;
...@@ -3987,7 +3987,7 @@ void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGr ...@@ -3987,7 +3987,7 @@ void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGr
STableId id = {.tid = 0, .uid = 0}; STableId id = {.tid = 0, .uid = 0};
taosHashPut(pRuntimeEnv->tableqinfoGroupInfo.map, &id.tid, sizeof(id.tid), &item, POINTER_BYTES); taosHashPut(pRuntimeEnv->tableqinfoGroupInfo.map, &id.tid, sizeof(id.tid), &item, POINTER_BYTES);
index += 1; idx += 1;
} }
} }
......
...@@ -87,24 +87,24 @@ int32_t tscAcquireRpc(const char *key, const char *user, const char *secretEncry ...@@ -87,24 +87,24 @@ int32_t tscAcquireRpc(const char *key, const char *user, const char *secretEncry
return 0; return 0;
} }
SRpcInit rpcInit; SRpcInit rpcInitial;
memset(&rpcInit, 0, sizeof(rpcInit)); memset(&rpcInitial, 0, sizeof(rpcInitial));
rpcInit.localPort = 0; rpcInitial.localPort = 0;
rpcInit.label = "TSC"; rpcInitial.label = "TSC";
rpcInit.numOfThreads = tscNumOfThreads; rpcInitial.numOfThreads = tscNumOfThreads;
rpcInit.cfp = tscProcessMsgFromServer; rpcInitial.cfp = tscProcessMsgFromServer;
rpcInit.sessions = tsMaxConnections; rpcInitial.sessions = tsMaxConnections;
rpcInit.connType = TAOS_CONN_CLIENT; rpcInitial.connType = TAOS_CONN_CLIENT;
rpcInit.user = (char *)user; rpcInitial.user = (char *)user;
rpcInit.idleTime = tsShellActivityTimer * 1000; rpcInitial.idleTime = tsShellActivityTimer * 1000;
rpcInit.ckey = "key"; rpcInitial.ckey = "key";
rpcInit.spi = 1; rpcInitial.spi = 1;
rpcInit.secret = (char *)secretEncrypt; rpcInitial.secret = (char *)secretEncrypt;
SRpcObj rpcObj; SRpcObj rpcObj;
memset(&rpcObj, 0, sizeof(rpcObj)); memset(&rpcObj, 0, sizeof(rpcObj));
tstrncpy(rpcObj.key, key, sizeof(rpcObj.key)); tstrncpy(rpcObj.key, key, sizeof(rpcObj.key));
rpcObj.pDnodeConn = rpcOpen(&rpcInit); rpcObj.pDnodeConn = rpcOpen(&rpcInitial);
if (rpcObj.pDnodeConn == NULL) { if (rpcObj.pDnodeConn == NULL) {
pthread_mutex_unlock(&rpcObjMutex); pthread_mutex_unlock(&rpcObjMutex);
tscError("failed to init connection to server"); tscError("failed to init connection to server");
......
...@@ -2305,10 +2305,10 @@ void tscCloseTscObj(void *param) { ...@@ -2305,10 +2305,10 @@ void tscCloseTscObj(void *param) {
} }
bool tscIsInsertData(char* sqlstr) { bool tscIsInsertData(char* sqlstr) {
int32_t index = 0; int32_t idx = 0;
do { do {
SStrToken t0 = tStrGetToken(sqlstr, &index, false); SStrToken t0 = tStrGetToken(sqlstr, &idx, false);
if (t0.type != TK_LP) { if (t0.type != TK_LP) {
return t0.type == TK_INSERT || t0.type == TK_IMPORT; return t0.type == TK_INSERT || t0.type == TK_IMPORT;
} }
...@@ -2378,12 +2378,12 @@ SInternalField* tscFieldInfoAppend(SFieldInfo* pFieldInfo, TAOS_FIELD* pField) { ...@@ -2378,12 +2378,12 @@ SInternalField* tscFieldInfoAppend(SFieldInfo* pFieldInfo, TAOS_FIELD* pField) {
return taosArrayPush(pFieldInfo->internalField, &info); return taosArrayPush(pFieldInfo->internalField, &info);
} }
SInternalField* tscFieldInfoInsert(SFieldInfo* pFieldInfo, int32_t index, TAOS_FIELD* field) { SInternalField* tscFieldInfoInsert(SFieldInfo* pFieldInfo, int32_t idx, TAOS_FIELD* field) {
pFieldInfo->numOfOutput++; pFieldInfo->numOfOutput++;
struct SInternalField info = { .pExpr = NULL, .visible = true }; struct SInternalField info = { .pExpr = NULL, .visible = true };
info.field = *field; info.field = *field;
return taosArrayInsert(pFieldInfo->internalField, index, &info); return taosArrayInsert(pFieldInfo->internalField, idx, &info);
} }
void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo) { void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo) {
...@@ -2398,18 +2398,18 @@ void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo) { ...@@ -2398,18 +2398,18 @@ void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo) {
} }
} }
SInternalField* tscFieldInfoGetInternalField(SFieldInfo* pFieldInfo, int32_t index) { SInternalField* tscFieldInfoGetInternalField(SFieldInfo* pFieldInfo, int32_t idx) {
assert(index < pFieldInfo->numOfOutput); assert(idx < pFieldInfo->numOfOutput);
return TARRAY_GET_ELEM(pFieldInfo->internalField, index); return TARRAY_GET_ELEM(pFieldInfo->internalField, idx);
} }
TAOS_FIELD* tscFieldInfoGetField(SFieldInfo* pFieldInfo, int32_t index) { TAOS_FIELD* tscFieldInfoGetField(SFieldInfo* pFieldInfo, int32_t idx) {
assert(index < pFieldInfo->numOfOutput); assert(idx < pFieldInfo->numOfOutput);
return &((SInternalField*)TARRAY_GET_ELEM(pFieldInfo->internalField, index))->field; return &((SInternalField*)TARRAY_GET_ELEM(pFieldInfo->internalField, idx))->field;
} }
int32_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index) { int32_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t idx) {
SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, index); SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, idx);
assert(pInfo != NULL && pInfo->pExpr->pExpr == NULL); assert(pInfo != NULL && pInfo->pExpr->pExpr == NULL);
return pInfo->pExpr->base.offset; return pInfo->pExpr->base.offset;
...@@ -2635,16 +2635,16 @@ SExprInfo* tscExprCreate(STableMetaInfo* pTableMetaInfo, int16_t functionId, SCo ...@@ -2635,16 +2635,16 @@ SExprInfo* tscExprCreate(STableMetaInfo* pTableMetaInfo, int16_t functionId, SCo
return pExpr; return pExpr;
} }
SExprInfo* tscExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type, SExprInfo* tscExprInsert(SQueryInfo* pQueryInfo, int32_t idx, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
int16_t size, int16_t resColId, int32_t interSize, bool isTagCol) { int16_t size, int16_t resColId, int32_t interSize, bool isTagCol) {
int32_t num = (int32_t)taosArrayGetSize(pQueryInfo->exprList); int32_t num = (int32_t)taosArrayGetSize(pQueryInfo->exprList);
if (index == num) { if (idx == num) {
return tscExprAppend(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol); return tscExprAppend(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol);
} }
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pColIndex->tableIndex); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pColIndex->tableIndex);
SExprInfo* pExpr = tscExprCreate(pTableMetaInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol); SExprInfo* pExpr = tscExprCreate(pTableMetaInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol);
taosArrayInsert(pQueryInfo->exprList, index, &pExpr); taosArrayInsert(pQueryInfo->exprList, idx, &pExpr);
return pExpr; return pExpr;
} }
...@@ -2656,10 +2656,10 @@ SExprInfo* tscExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnInde ...@@ -2656,10 +2656,10 @@ SExprInfo* tscExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnInde
return pExpr; return pExpr;
} }
SExprInfo* tscExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, int16_t srcColumnIndex, SExprInfo* tscExprUpdate(SQueryInfo* pQueryInfo, int32_t idx, int16_t functionId, int16_t srcColumnIndex,
int16_t type, int32_t size) { int16_t type, int32_t size) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
SExprInfo* pExpr = tscExprGet(pQueryInfo, index); SExprInfo* pExpr = tscExprGet(pQueryInfo, idx);
if (pExpr == NULL) { if (pExpr == NULL) {
return NULL; return NULL;
} }
...@@ -2676,8 +2676,8 @@ SExprInfo* tscExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t function ...@@ -2676,8 +2676,8 @@ SExprInfo* tscExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t function
return pExpr; return pExpr;
} }
bool tscMultiRoundQuery(SQueryInfo* pQueryInfo, int32_t index) { bool tscMultiRoundQuery(SQueryInfo* pQueryInfo, int32_t idx) {
if (!UTIL_TABLE_IS_SUPER_TABLE(pQueryInfo->pTableMetaInfo[index])) { if (!UTIL_TABLE_IS_SUPER_TABLE(pQueryInfo->pTableMetaInfo[idx])) {
return false; return false;
} }
...@@ -2725,8 +2725,8 @@ void tscExprAddParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t byt ...@@ -2725,8 +2725,8 @@ void tscExprAddParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t byt
assert(pExpr->numOfParams <= 3); assert(pExpr->numOfParams <= 3);
} }
SExprInfo* tscExprGet(SQueryInfo* pQueryInfo, int32_t index) { SExprInfo* tscExprGet(SQueryInfo* pQueryInfo, int32_t idx) {
return taosArrayGetP(pQueryInfo->exprList, index); return taosArrayGetP(pQueryInfo->exprList, idx);
} }
/* /*
...@@ -3014,6 +3014,10 @@ int32_t tscValidateName(SStrToken* pToken, bool escapeEnabled, bool *dbIncluded) ...@@ -3014,6 +3014,10 @@ int32_t tscValidateName(SStrToken* pToken, bool escapeEnabled, bool *dbIncluded)
// single token, validate it // single token, validate it
if (len == pToken->n) { if (len == pToken->n) {
if (taosIsKeyWordToken(pToken->z, (int32_t) pToken->n)) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
return validateQuoteToken(pToken, escapeEnabled, NULL); return validateQuoteToken(pToken, escapeEnabled, NULL);
} else { } else {
sep = strnchr(pToken->z, TS_PATH_DELIMITER[0], pToken->n, true); sep = strnchr(pToken->z, TS_PATH_DELIMITER[0], pToken->n, true);
...@@ -3297,8 +3301,8 @@ void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SQueryInfo* pQueryInfo) { ...@@ -3297,8 +3301,8 @@ void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SQueryInfo* pQueryInfo) {
if (TSDB_COL_IS_TAG(pExpr->base.colInfo.flag)) { if (TSDB_COL_IS_TAG(pExpr->base.colInfo.flag)) {
SSchema* pTagSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta); SSchema* pTagSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
int16_t index = pExpr->base.colInfo.colIndex; int16_t idx = pExpr->base.colInfo.colIndex;
pColInfo[i].type = (index != -1) ? pTagSchema[index].type : TSDB_DATA_TYPE_BINARY; pColInfo[i].type = (idx != -1) ? pTagSchema[idx].type : TSDB_DATA_TYPE_BINARY;
} else { } else {
pColInfo[i].type = pSchema[pExpr->base.colInfo.colIndex].type; pColInfo[i].type = pSchema[pExpr->base.colInfo.colIndex].type;
} }
...@@ -3381,7 +3385,7 @@ SQueryInfo* tscGetQueryInfoS(SSqlCmd* pCmd) { ...@@ -3381,7 +3385,7 @@ SQueryInfo* tscGetQueryInfoS(SSqlCmd* pCmd) {
return pQueryInfo; return pQueryInfo;
} }
STableMetaInfo* tscGetTableMetaInfoByUid(SQueryInfo* pQueryInfo, uint64_t uid, int32_t* index) { STableMetaInfo* tscGetTableMetaInfoByUid(SQueryInfo* pQueryInfo, uint64_t uid, int32_t* idx) {
int32_t k = -1; int32_t k = -1;
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
...@@ -3391,8 +3395,8 @@ STableMetaInfo* tscGetTableMetaInfoByUid(SQueryInfo* pQueryInfo, uint64_t uid, i ...@@ -3391,8 +3395,8 @@ STableMetaInfo* tscGetTableMetaInfoByUid(SQueryInfo* pQueryInfo, uint64_t uid, i
} }
} }
if (index != NULL) { if (idx != NULL) {
*index = k; *idx = k;
} }
assert(k != -1); assert(k != -1);
...@@ -3615,19 +3619,19 @@ void tscFreeVgroupTableInfo(SArray* pVgroupTables) { ...@@ -3615,19 +3619,19 @@ void tscFreeVgroupTableInfo(SArray* pVgroupTables) {
taosArrayDestroy(&pVgroupTables); taosArrayDestroy(&pVgroupTables);
} }
void tscRemoveVgroupTableGroup(SArray* pVgroupTable, int32_t index) { void tscRemoveVgroupTableGroup(SArray* pVgroupTable, int32_t idx) {
assert(pVgroupTable != NULL && index >= 0); assert(pVgroupTable != NULL && idx >= 0);
size_t size = taosArrayGetSize(pVgroupTable); size_t size = taosArrayGetSize(pVgroupTable);
assert(size > index); assert(size > idx);
SVgroupTableInfo* pInfo = taosArrayGet(pVgroupTable, index); SVgroupTableInfo* pInfo = taosArrayGet(pVgroupTable, idx);
// for(int32_t j = 0; j < pInfo->vgInfo.numOfEps; ++j) { // for(int32_t j = 0; j < pInfo->vgInfo.numOfEps; ++j) {
// tfree(pInfo->vgInfo.epAddr[j].fqdn); // tfree(pInfo->vgInfo.epAddr[j].fqdn);
// } // }
taosArrayDestroy(&pInfo->itemList); taosArrayDestroy(&pInfo->itemList);
taosArrayRemove(pVgroupTable, index); taosArrayRemove(pVgroupTable, idx);
} }
void tscVgroupTableCopy(SVgroupTableInfo* info, SVgroupTableInfo* pInfo) { void tscVgroupTableCopy(SVgroupTableInfo* info, SVgroupTableInfo* pInfo) {
...@@ -4102,15 +4106,15 @@ static void tscSubqueryRetrieveCallback(void* param, TAOS_RES* tres, int code) { ...@@ -4102,15 +4106,15 @@ static void tscSubqueryRetrieveCallback(void* param, TAOS_RES* tres, int code) {
SSqlObj* pParentSql = ps->pParentSql; SSqlObj* pParentSql = ps->pParentSql;
SSqlObj* pSql = tres; SSqlObj* pSql = tres;
int32_t index = ps->subqueryIndex; int32_t idx = ps->subqueryIndex;
bool ret = subAndCheckDone(pSql, pParentSql, index); bool ret = subAndCheckDone(pSql, pParentSql, idx);
// TODO refactor // TODO refactor
tfree(ps); tfree(ps);
pSql->param = NULL; pSql->param = NULL;
if (!ret) { if (!ret) {
tscDebug("0x%"PRIx64" sub:0x%"PRIx64" orderOfSub:%d completed, not all subquery finished", pParentSql->self, pSql->self, index); tscDebug("0x%"PRIx64" sub:0x%"PRIx64" orderOfSub:%d completed, not all subquery finished", pParentSql->self, pSql->self, idx);
return; return;
} }
...@@ -4131,13 +4135,13 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { ...@@ -4131,13 +4135,13 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) {
if (pSql->res.code != TSDB_CODE_SUCCESS) { if (pSql->res.code != TSDB_CODE_SUCCESS) {
SSqlObj* pParentSql = ps->pParentSql; SSqlObj* pParentSql = ps->pParentSql;
int32_t index = ps->subqueryIndex; int32_t idx = ps->subqueryIndex;
bool ret = subAndCheckDone(pSql, pParentSql, index); bool ret = subAndCheckDone(pSql, pParentSql, idx);
tscFreeRetrieveSup(&pSql->param); tscFreeRetrieveSup(&pSql->param);
if (!ret) { if (!ret) {
tscDebug("0x%"PRIx64" sub:0x%"PRIx64" orderOfSub:%d completed, not all subquery finished", pParentSql->self, pSql->self, index); tscDebug("0x%"PRIx64" sub:0x%"PRIx64" orderOfSub:%d completed, not all subquery finished", pParentSql->self, pSql->self, idx);
return; return;
} }
......
...@@ -60,40 +60,40 @@ void calc_i32_i32_add(void *left, void *right, int32_t numLeft, int32_t numRight ...@@ -60,40 +60,40 @@ void calc_i32_i32_add(void *left, void *right, int32_t numLeft, int32_t numRight
} }
} }
typedef double (*_arithmetic_getVectorDoubleValue_fn_t)(void *src, int32_t index); typedef double (*_arithmetic_getVectorDoubleValue_fn_t)(void *src, int32_t idx);
double getVectorDoubleValue_TINYINT(void *src, int32_t index) { double getVectorDoubleValue_TINYINT(void *src, int32_t idx) {
return (double)*((int8_t *)src + index); return (double)*((int8_t *)src + idx);
} }
double getVectorDoubleValue_UTINYINT(void *src, int32_t index) { double getVectorDoubleValue_UTINYINT(void *src, int32_t idx) {
return (double)*((uint8_t *)src + index); return (double)*((uint8_t *)src + idx);
} }
double getVectorDoubleValue_SMALLINT(void *src, int32_t index) { double getVectorDoubleValue_SMALLINT(void *src, int32_t idx) {
return (double)*((int16_t *)src + index); return (double)*((int16_t *)src + idx);
} }
double getVectorDoubleValue_USMALLINT(void *src, int32_t index) { double getVectorDoubleValue_USMALLINT(void *src, int32_t idx) {
return (double)*((uint16_t *)src + index); return (double)*((uint16_t *)src + idx);
} }
double getVectorDoubleValue_INT(void *src, int32_t index) { double getVectorDoubleValue_INT(void *src, int32_t idx) {
return (double)*((int32_t *)src + index); return (double)*((int32_t *)src + idx);
} }
double getVectorDoubleValue_UINT(void *src, int32_t index) { double getVectorDoubleValue_UINT(void *src, int32_t idx) {
return (double)*((uint32_t *)src + index); return (double)*((uint32_t *)src + idx);
} }
double getVectorDoubleValue_BIGINT(void *src, int32_t index) { double getVectorDoubleValue_BIGINT(void *src, int32_t idx) {
return (double)*((int64_t *)src + index); return (double)*((int64_t *)src + idx);
} }
double getVectorDoubleValue_UBIGINT(void *src, int32_t index) { double getVectorDoubleValue_UBIGINT(void *src, int32_t idx) {
return (double)*((uint64_t *)src + index); return (double)*((uint64_t *)src + idx);
} }
double getVectorDoubleValue_FLOAT(void *src, int32_t index) { double getVectorDoubleValue_FLOAT(void *src, int32_t idx) {
return (double)*((float *)src + index); return (double)*((float *)src + idx);
} }
double getVectorDoubleValue_DOUBLE(void *src, int32_t index) { double getVectorDoubleValue_DOUBLE(void *src, int32_t idx) {
return (double)*((double *)src + index); return (double)*((double *)src + idx);
} }
int64_t getVectorTimestampValue(void *src, int32_t index) { int64_t getVectorTimestampValue(void *src, int32_t idx) {
return (int64_t)*((int64_t *)src + index); return (int64_t)*((int64_t *)src + idx);
} }
_arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFn(int32_t srcType) { _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFn(int32_t srcType) {
_arithmetic_getVectorDoubleValue_fn_t p = NULL; _arithmetic_getVectorDoubleValue_fn_t p = NULL;
...@@ -124,40 +124,40 @@ _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFn(int32_t srcType) { ...@@ -124,40 +124,40 @@ _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFn(int32_t srcType) {
} }
typedef void* (*_arithmetic_getVectorValueAddr_fn_t)(void *src, int32_t index); typedef void* (*_arithmetic_getVectorValueAddr_fn_t)(void *src, int32_t idx);
void* getVectorValueAddr_BOOL(void *src, int32_t index) { void* getVectorValueAddr_BOOL(void *src, int32_t idx) {
return (void*)((bool *)src + index); return (void*)((bool *)src + idx);
} }
void* getVectorValueAddr_TINYINT(void *src, int32_t index) { void* getVectorValueAddr_TINYINT(void *src, int32_t idx) {
return (void*)((int8_t *)src + index); return (void*)((int8_t *)src + idx);
} }
void* getVectorValueAddr_UTINYINT(void *src, int32_t index) { void* getVectorValueAddr_UTINYINT(void *src, int32_t idx) {
return (void*)((uint8_t *)src + index); return (void*)((uint8_t *)src + idx);
} }
void* getVectorValueAddr_SMALLINT(void *src, int32_t index) { void* getVectorValueAddr_SMALLINT(void *src, int32_t idx) {
return (void*)((int16_t *)src + index); return (void*)((int16_t *)src + idx);
} }
void* getVectorValueAddr_USMALLINT(void *src, int32_t index) { void* getVectorValueAddr_USMALLINT(void *src, int32_t idx) {
return (void*)((uint16_t *)src + index); return (void*)((uint16_t *)src + idx);
} }
void* getVectorValueAddr_INT(void *src, int32_t index) { void* getVectorValueAddr_INT(void *src, int32_t idx) {
return (void*)((int32_t *)src + index); return (void*)((int32_t *)src + idx);
} }
void* getVectorValueAddr_UINT(void *src, int32_t index) { void* getVectorValueAddr_UINT(void *src, int32_t idx) {
return (void*)((uint32_t *)src + index); return (void*)((uint32_t *)src + idx);
} }
void* getVectorValueAddr_BIGINT(void *src, int32_t index) { void* getVectorValueAddr_BIGINT(void *src, int32_t idx) {
return (void*)((int64_t *)src + index); return (void*)((int64_t *)src + idx);
} }
void* getVectorValueAddr_UBIGINT(void *src, int32_t index) { void* getVectorValueAddr_UBIGINT(void *src, int32_t idx) {
return (void*)((uint64_t *)src + index); return (void*)((uint64_t *)src + idx);
} }
void* getVectorValueAddr_FLOAT(void *src, int32_t index) { void* getVectorValueAddr_FLOAT(void *src, int32_t idx) {
return (void*)((float *)src + index); return (void*)((float *)src + idx);
} }
void* getVectorValueAddr_DOUBLE(void *src, int32_t index) { void* getVectorValueAddr_DOUBLE(void *src, int32_t idx) {
return (void*)((double *)src + index); return (void*)((double *)src + idx);
} }
_arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFn(int32_t srcType) { _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFn(int32_t srcType) {
...@@ -474,34 +474,34 @@ void vectorRemainder(void *left, int32_t len1, int32_t _left_type, void *right, ...@@ -474,34 +474,34 @@ void vectorRemainder(void *left, int32_t len1, int32_t _left_type, void *right,
} }
} }
typedef int64_t (*_arithmetic_getVectorBigintValue_fn_t)(void *src, int32_t index); typedef int64_t (*_arithmetic_getVectorBigintValue_fn_t)(void *src, int32_t idx);
int64_t getVectorBigintValue_BOOL(void *src, int32_t index) { int64_t getVectorBigintValue_BOOL(void *src, int32_t idx) {
return (int64_t)*((bool *)src + index); return (int64_t)*((bool *)src + idx);
} }
int64_t getVectorBigintValue_TINYINT(void *src, int32_t index) { int64_t getVectorBigintValue_TINYINT(void *src, int32_t idx) {
return (int64_t)*((int8_t *)src + index); return (int64_t)*((int8_t *)src + idx);
} }
int64_t getVectorBigintValue_UTINYINT(void *src, int32_t index) { int64_t getVectorBigintValue_UTINYINT(void *src, int32_t idx) {
return (int64_t)*((uint8_t *)src + index); return (int64_t)*((uint8_t *)src + idx);
} }
int64_t getVectorBigintValue_SMALLINT(void *src, int32_t index) { int64_t getVectorBigintValue_SMALLINT(void *src, int32_t idx) {
return (int64_t)*((int16_t *)src + index); return (int64_t)*((int16_t *)src + idx);
} }
int64_t getVectorBigintValue_USMALLINT(void *src, int32_t index) { int64_t getVectorBigintValue_USMALLINT(void *src, int32_t idx) {
return (int64_t)*((uint16_t *)src + index); return (int64_t)*((uint16_t *)src + idx);
} }
int64_t getVectorBigintValue_INT(void *src, int32_t index) { int64_t getVectorBigintValue_INT(void *src, int32_t idx) {
return (int64_t)*((int32_t *)src + index); return (int64_t)*((int32_t *)src + idx);
} }
int64_t getVectorBigintValue_UINT(void *src, int32_t index) { int64_t getVectorBigintValue_UINT(void *src, int32_t idx) {
return (int64_t)*((uint32_t *)src + index); return (int64_t)*((uint32_t *)src + idx);
} }
int64_t getVectorBigintValue_BIGINT(void *src, int32_t index) { int64_t getVectorBigintValue_BIGINT(void *src, int32_t idx) {
return (int64_t)*((int64_t *)src + index); return (int64_t)*((int64_t *)src + idx);
} }
int64_t getVectorBigintValue_UBIGINT(void *src, int32_t index) { int64_t getVectorBigintValue_UBIGINT(void *src, int32_t idx) {
return (int64_t)*((uint64_t *)src + index); return (int64_t)*((uint64_t *)src + idx);
} }
_arithmetic_getVectorBigintValue_fn_t getVectorBigintValueFn(int32_t srcType) { _arithmetic_getVectorBigintValue_fn_t getVectorBigintValueFn(int32_t srcType) {
......
...@@ -304,14 +304,14 @@ bool isNEleNull(SDataCol *pCol, int nEle) { ...@@ -304,14 +304,14 @@ bool isNEleNull(SDataCol *pCol, int nEle) {
return true; return true;
} }
static FORCE_INLINE void dataColSetNullAt(SDataCol *pCol, int index) { static FORCE_INLINE void dataColSetNullAt(SDataCol *pCol, int idx) {
if (IS_VAR_DATA_TYPE(pCol->type)) { if (IS_VAR_DATA_TYPE(pCol->type)) {
pCol->dataOff[index] = pCol->len; pCol->dataOff[idx] = pCol->len;
char *ptr = POINTER_SHIFT(pCol->pData, pCol->len); char *ptr = POINTER_SHIFT(pCol->pData, pCol->len);
setVardataNull(ptr, pCol->type); setVardataNull(ptr, pCol->type);
pCol->len += varDataTLen(ptr); pCol->len += varDataTLen(ptr);
} else { } else {
setNull(POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * index), pCol->type, pCol->bytes); setNull(POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * idx), pCol->type, pCol->bytes);
pCol->len += TYPE_BYTES[pCol->type]; pCol->len += TYPE_BYTES[pCol->type];
} }
} }
......
...@@ -397,10 +397,10 @@ bool taosCfgDynamicOptions(char *msg) { ...@@ -397,10 +397,10 @@ bool taosCfgDynamicOptions(char *msg) {
return false; return false;
} }
void taosAddDataDir(int index, char *v1, int level, int primary) { void taosAddDataDir(int idx, char *v1, int level, int primary) {
tstrncpy(tsDiskCfg[index].dir, v1, TSDB_FILENAME_LEN); tstrncpy(tsDiskCfg[idx].dir, v1, TSDB_FILENAME_LEN);
tsDiskCfg[index].level = level; tsDiskCfg[idx].level = level;
tsDiskCfg[index].primary = primary; tsDiskCfg[idx].primary = primary;
uTrace("dataDir:%s, level:%d primary:%d is configured", v1, level, primary); uTrace("dataDir:%s, level:%d primary:%d is configured", v1, level, primary);
} }
......
...@@ -442,29 +442,29 @@ void tNameAssign(SName* dst, const SName* src) { ...@@ -442,29 +442,29 @@ void tNameAssign(SName* dst, const SName* src) {
memcpy(dst, src, sizeof(SName)); memcpy(dst, src, sizeof(SName));
} }
int32_t tNameSetDbName(SName* dst, const char* acct, SStrToken* dbToken) { int32_t tNameSetDbName(SName* dst, const char* accnt, SStrToken* dbToken) {
assert(dst != NULL && dbToken != NULL && acct != NULL); assert(dst != NULL && dbToken != NULL && accnt != NULL);
// too long account id or too long db name // too long account id or too long db name
if (strlen(acct) >= tListLen(dst->acctId) || dbToken->n >= tListLen(dst->dbname)) { if (strlen(accnt) >= tListLen(dst->acctId) || dbToken->n >= tListLen(dst->dbname)) {
return -1; return -1;
} }
dst->type = TSDB_DB_NAME_T; dst->type = TSDB_DB_NAME_T;
tstrncpy(dst->acctId, acct, tListLen(dst->acctId)); tstrncpy(dst->acctId, accnt, tListLen(dst->acctId));
tstrncpy(dst->dbname, dbToken->z, dbToken->n + 1); tstrncpy(dst->dbname, dbToken->z, dbToken->n + 1);
return 0; return 0;
} }
int32_t tNameSetAcctId(SName* dst, const char* acct) { int32_t tNameSetAcctId(SName* dst, const char* accnt) {
assert(dst != NULL && acct != NULL); assert(dst != NULL && accnt != NULL);
// too long account id or too long db name // too long account id or too long db name
if (strlen(acct) >= tListLen(dst->acctId)) { if (strlen(accnt) >= tListLen(dst->acctId)) {
return -1; return -1;
} }
tstrncpy(dst->acctId, acct, tListLen(dst->acctId)); tstrncpy(dst->acctId, accnt, tListLen(dst->acctId));
assert(strlen(dst->acctId) > 0); assert(strlen(dst->acctId) > 0);
......
...@@ -259,8 +259,8 @@ static void getStatics_u64(const void *pData, int32_t numOfRow, int64_t *min, in ...@@ -259,8 +259,8 @@ static void getStatics_u64(const void *pData, int32_t numOfRow, int64_t *min, in
static void getStatics_f(const void *pData, int32_t numOfRow, int64_t *min, int64_t *max, static void getStatics_f(const void *pData, int32_t numOfRow, int64_t *min, int64_t *max,
int64_t *sum, int16_t *minIndex, int16_t *maxIndex, int16_t *numOfNull) { int64_t *sum, int16_t *minIndex, int16_t *maxIndex, int16_t *numOfNull) {
float *data = (float *)pData; float *data = (float *)pData;
float fmin = FLT_MAX; float flmin = FLT_MAX;
float fmax = -FLT_MAX; float flmax = -FLT_MAX;
double dsum = 0; double dsum = 0;
*minIndex = 0; *minIndex = 0;
*maxIndex = 0; *maxIndex = 0;
...@@ -276,20 +276,20 @@ static void getStatics_f(const void *pData, int32_t numOfRow, int64_t *min, int6 ...@@ -276,20 +276,20 @@ static void getStatics_f(const void *pData, int32_t numOfRow, int64_t *min, int6
float fv = GET_FLOAT_VAL((const char*)&(data[i])); float fv = GET_FLOAT_VAL((const char*)&(data[i]));
dsum += fv; dsum += fv;
if (fmin > fv) { if (flmin > fv) {
fmin = fv; flmin = fv;
*minIndex = i; *minIndex = i;
} }
if (fmax < fv) { if (flmax < fv) {
fmax = fv; flmax = fv;
*maxIndex = i; *maxIndex = i;
} }
} }
SET_DOUBLE_VAL(sum, dsum); SET_DOUBLE_VAL(sum, dsum);
SET_DOUBLE_VAL(max, fmax); SET_DOUBLE_VAL(max, flmax);
SET_DOUBLE_VAL(min, fmin); SET_DOUBLE_VAL(min, flmin);
} }
static void getStatics_d(const void *pData, int32_t numOfRow, int64_t *min, int64_t *max, static void getStatics_d(const void *pData, int32_t numOfRow, int64_t *min, int64_t *max,
......
...@@ -229,12 +229,12 @@ static void dnodeAllocCheckItem() { ...@@ -229,12 +229,12 @@ static void dnodeAllocCheckItem() {
} }
void dnodeCleanupCheck() { void dnodeCleanupCheck() {
for (ECheckItemType index = 0; index < TSDB_CHECK_ITEM_MAX; ++index) { for (ECheckItemType idx = 0; idx < TSDB_CHECK_ITEM_MAX; ++idx) {
if (tsCheckItem[index].enable && tsCheckItem[index].stopFp) { if (tsCheckItem[idx].enable && tsCheckItem[idx].stopFp) {
(*tsCheckItem[index].stopFp)(); (*tsCheckItem[idx].stopFp)();
} }
if (tsCheckItem[index].cleanUpFp) { if (tsCheckItem[idx].cleanUpFp) {
(*tsCheckItem[index].cleanUpFp)(); (*tsCheckItem[idx].cleanUpFp)();
} }
} }
} }
...@@ -242,19 +242,19 @@ void dnodeCleanupCheck() { ...@@ -242,19 +242,19 @@ void dnodeCleanupCheck() {
int32_t dnodeInitCheck() { int32_t dnodeInitCheck() {
dnodeAllocCheckItem(); dnodeAllocCheckItem();
for (ECheckItemType index = 0; index < TSDB_CHECK_ITEM_MAX; ++index) { for (ECheckItemType idx = 0; idx < TSDB_CHECK_ITEM_MAX; ++idx) {
if (tsCheckItem[index].initFp) { if (tsCheckItem[idx].initFp) {
if ((*tsCheckItem[index].initFp)() != 0) { if ((*tsCheckItem[idx].initFp)() != 0) {
dError("failed to init check item:%s", tsCheckItem[index].name); dError("failed to init check item:%s", tsCheckItem[idx].name);
return -1; return -1;
} }
} }
} }
for (ECheckItemType index = 0; index < TSDB_CHECK_ITEM_MAX; ++index) { for (ECheckItemType idx = 0; idx < TSDB_CHECK_ITEM_MAX; ++idx) {
if (tsCheckItem[index].enable && tsCheckItem[index].startFp) { if (tsCheckItem[idx].enable && tsCheckItem[idx].startFp) {
if ((*tsCheckItem[index].startFp)() != 0) { if ((*tsCheckItem[idx].startFp)() != 0) {
dError("failed to check item:%s", tsCheckItem[index].name); dError("failed to check item:%s", tsCheckItem[idx].name);
exit(-1); exit(-1);
} }
} }
......
...@@ -56,17 +56,17 @@ int32_t dnodeInitServer() { ...@@ -56,17 +56,17 @@ int32_t dnodeInitServer() {
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_DM_GRANT] = dnodeDispatchToMPeerQueue; dnodeProcessReqMsgFp[TSDB_MSG_TYPE_DM_GRANT] = dnodeDispatchToMPeerQueue;
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_DM_STATUS] = dnodeDispatchToMPeerQueue; dnodeProcessReqMsgFp[TSDB_MSG_TYPE_DM_STATUS] = dnodeDispatchToMPeerQueue;
SRpcInit rpcInit; SRpcInit rpcInitial;
memset(&rpcInit, 0, sizeof(rpcInit)); memset(&rpcInitial, 0, sizeof(rpcInitial));
rpcInit.localPort = tsDnodeDnodePort; rpcInitial.localPort = tsDnodeDnodePort;
rpcInit.label = "DND-S"; rpcInitial.label = "DND-S";
rpcInit.numOfThreads = 1; rpcInitial.numOfThreads = 1;
rpcInit.cfp = dnodeProcessReqMsgFromDnode; rpcInitial.cfp = dnodeProcessReqMsgFromDnode;
rpcInit.sessions = TSDB_MAX_VNODES << 4; rpcInitial.sessions = TSDB_MAX_VNODES << 4;
rpcInit.connType = TAOS_CONN_SERVER; rpcInitial.connType = TAOS_CONN_SERVER;
rpcInit.idleTime = tsShellActivityTimer * 1000; rpcInitial.idleTime = tsShellActivityTimer * 1000;
tsServerRpc = rpcOpen(&rpcInit); tsServerRpc = rpcOpen(&rpcInitial);
if (tsServerRpc == NULL) { if (tsServerRpc == NULL) {
dError("failed to init inter-dnodes RPC server"); dError("failed to init inter-dnodes RPC server");
return -1; return -1;
...@@ -123,19 +123,19 @@ static void dnodeProcessReqMsgFromDnode(SRpcMsg *pMsg, SRpcEpSet *pEpSet) { ...@@ -123,19 +123,19 @@ static void dnodeProcessReqMsgFromDnode(SRpcMsg *pMsg, SRpcEpSet *pEpSet) {
int32_t dnodeInitClient() { int32_t dnodeInitClient() {
char secret[TSDB_KEY_LEN] = "secret"; char secret[TSDB_KEY_LEN] = "secret";
SRpcInit rpcInit; SRpcInit rpcInitial;
memset(&rpcInit, 0, sizeof(rpcInit)); memset(&rpcInitial, 0, sizeof(rpcInitial));
rpcInit.label = "DND-C"; rpcInitial.label = "DND-C";
rpcInit.numOfThreads = 1; rpcInitial.numOfThreads = 1;
rpcInit.cfp = dnodeProcessRspFromDnode; rpcInitial.cfp = dnodeProcessRspFromDnode;
rpcInit.sessions = TSDB_MAX_VNODES << 4; rpcInitial.sessions = TSDB_MAX_VNODES << 4;
rpcInit.connType = TAOS_CONN_CLIENT; rpcInitial.connType = TAOS_CONN_CLIENT;
rpcInit.idleTime = tsShellActivityTimer * 1000; rpcInitial.idleTime = tsShellActivityTimer * 1000;
rpcInit.user = "t"; rpcInitial.user = "t";
rpcInit.ckey = "key"; rpcInitial.ckey = "key";
rpcInit.secret = secret; rpcInitial.secret = secret;
tsClientRpc = rpcOpen(&rpcInit); tsClientRpc = rpcOpen(&rpcInitial);
if (tsClientRpc == NULL) { if (tsClientRpc == NULL) {
dError("failed to init mnode rpc client"); dError("failed to init mnode rpc client");
return -1; return -1;
......
...@@ -83,18 +83,18 @@ int32_t dnodeInitShell() { ...@@ -83,18 +83,18 @@ int32_t dnodeInitShell() {
numOfThreads = 1; numOfThreads = 1;
} }
SRpcInit rpcInit; SRpcInit rpcInitial;
memset(&rpcInit, 0, sizeof(rpcInit)); memset(&rpcInitial, 0, sizeof(rpcInitial));
rpcInit.localPort = tsDnodeShellPort; rpcInitial.localPort = tsDnodeShellPort;
rpcInit.label = "SHELL"; rpcInitial.label = "SHELL";
rpcInit.numOfThreads = numOfThreads; rpcInitial.numOfThreads = numOfThreads;
rpcInit.cfp = dnodeProcessMsgFromShell; rpcInitial.cfp = dnodeProcessMsgFromShell;
rpcInit.sessions = tsMaxShellConns; rpcInitial.sessions = tsMaxShellConns;
rpcInit.connType = TAOS_CONN_SERVER; rpcInitial.connType = TAOS_CONN_SERVER;
rpcInit.idleTime = tsShellActivityTimer * 1000; rpcInitial.idleTime = tsShellActivityTimer * 1000;
rpcInit.afp = dnodeRetrieveUserAuthInfo; rpcInitial.afp = dnodeRetrieveUserAuthInfo;
tsShellRpc = rpcOpen(&rpcInit); tsShellRpc = rpcOpen(&rpcInitial);
if (tsShellRpc == NULL) { if (tsShellRpc == NULL) {
dError("failed to init shell rpc server"); dError("failed to init shell rpc server");
return -1; return -1;
...@@ -258,10 +258,10 @@ SDnodeStatisInfo dnodeGetStatisInfo() { ...@@ -258,10 +258,10 @@ SDnodeStatisInfo dnodeGetStatisInfo() {
return info; return info;
} }
int32_t dnodeGetHttpStatusInfo(int32_t index) { int32_t dnodeGetHttpStatusInfo(int32_t idx) {
int32_t httpStatus = 0; int32_t httpStatus = 0;
#ifdef HTTP_EMBEDDED #ifdef HTTP_EMBEDDED
httpStatus = httpGetStatusCodeCount(index); httpStatus = httpGetStatusCodeCount(idx);
#endif #endif
return httpStatus; return httpStatus;
} }
......
此差异已折叠。
Subproject commit 5fdd694621fbb7bd2d6102ff4feaec92a7001038 Subproject commit 9cb71e3c4c0474553aa961cbe19795541c29b5c7
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册