diff --git a/deps/TSZ/sz/src/CompressElement.c b/deps/TSZ/sz/src/CompressElement.c
index b71ff9638eae603dd1ccad6e9f49a5c8add1ab0e..a215a3aebc87b83be9da867efa29f1ff4be271cd 100644
--- a/deps/TSZ/sz/src/CompressElement.c
+++ b/deps/TSZ/sz/src/CompressElement.c
@@ -7,7 +7,9 @@
* See COPYRIGHT in top-level directory.
*/
#ifndef WINDOWS
+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
#pragma GCC diagnostic push
+#endif
#pragma GCC diagnostic ignored "-Wchar-subscripts"
#endif
@@ -233,5 +235,7 @@ INLINE void updateLossyCompElement_Float(unsigned char* diffBytes, unsigned char
}
#ifndef WINDOWS
+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
#pragma GCC diagnostic pop
-#endif
\ No newline at end of file
+#endif
+#endif
diff --git a/deps/TSZ/sz/src/Huffman.c b/deps/TSZ/sz/src/Huffman.c
index 9868f3c0cb2b8f063226092adb13141dddc4b068..6db8b15d1f32bea41f444f68831efc064a80f111 100644
--- a/deps/TSZ/sz/src/Huffman.c
+++ b/deps/TSZ/sz/src/Huffman.c
@@ -117,7 +117,7 @@ node qremove(HuffmanTree* huffmanTree)
/**
* @out1 should be set to 0.
* @out2 should be 0 as well.
- * @index: the index of the byte
+ * @idx: the idx of the byte
* */
void build_code(HuffmanTree *huffmanTree, node n, int len, unsigned long out1, unsigned long out2)
{
@@ -136,8 +136,8 @@ void build_code(HuffmanTree *huffmanTree, node n, int len, unsigned long out1, u
huffmanTree->cout[n->c] = (unsigned char)len;
return;
}
- int index = len >> 6; //=len/64
- if(index == 0)
+ int idx = len >> 6; //=len/64
+ if(idx == 0)
{
out1 = out1 << 1;
out1 = out1 | 0;
@@ -164,13 +164,13 @@ void build_code(HuffmanTree *huffmanTree, node n, int len, unsigned long out1, u
* */
void init(HuffmanTree* huffmanTree, int *s, size_t length)
{
- size_t i, index;
+ size_t i, idx;
size_t *freq = (size_t *)malloc(huffmanTree->allNodes*sizeof(size_t));
memset(freq, 0, huffmanTree->allNodes*sizeof(size_t));
for(i = 0;i < length;i++)
{
- index = s[i];
- freq[index]++;
+ idx = s[i];
+ freq[idx]++;
}
for (i = 0; i < huffmanTree->allNodes; i++)
diff --git a/deps/TSZ/sz/src/TightDataPointStorageD.c b/deps/TSZ/sz/src/TightDataPointStorageD.c
index 469a1bdce96aba395f92bc09ef345d72c3934064..e1a2af9c04283b3bcb65212f6896c2dd99f2d17a 100644
--- a/deps/TSZ/sz/src/TightDataPointStorageD.c
+++ b/deps/TSZ/sz/src/TightDataPointStorageD.c
@@ -25,9 +25,9 @@ void new_TightDataPointStorageD_Empty(TightDataPointStorageD **this)
int new_TightDataPointStorageD_fromFlatBytes(TightDataPointStorageD **this, unsigned char* flatBytes, size_t flatBytesLength, sz_exedata* pde_exe, sz_params* pde_params)
{
new_TightDataPointStorageD_Empty(this);
- size_t i, index = 0;
- unsigned char version = flatBytes[index++]; //3
- unsigned char sameRByte = flatBytes[index++]; //1
+ size_t i, idx = 0;
+ unsigned char version = flatBytes[idx++]; //3
+ unsigned char sameRByte = flatBytes[idx++]; //1
// parse data format
switch (version)
@@ -46,15 +46,15 @@ int new_TightDataPointStorageD_fromFlatBytes(TightDataPointStorageD **this, unsi
pde_params->accelerate_pw_rel_compression = (sameRByte & 0x08) >> 3;
int errorBoundMode = SZ_ABS;
- convertBytesToSZParams(&(flatBytes[index]), pde_params, pde_exe);
+ convertBytesToSZParams(&(flatBytes[idx]), pde_params, pde_exe);
- index += MetaDataByteLength_double;
+ idx += MetaDataByteLength_double;
int isRegression = (sameRByte >> 7) & 0x01;
unsigned char dsLengthBytes[8];
for (i = 0; i < pde_exe->SZ_SIZE_TYPE; i++)
- dsLengthBytes[i] = flatBytes[index++];
+ dsLengthBytes[i] = flatBytes[idx++];
(*this)->dataSeriesLength = bytesToSize(dsLengthBytes, pde_exe->SZ_SIZE_TYPE);
if((*this)->isLossless==1)
@@ -65,7 +65,7 @@ int new_TightDataPointStorageD_fromFlatBytes(TightDataPointStorageD **this, unsi
else if(same==1)
{
(*this)->allSameData = 1;
- (*this)->exactMidBytes = &(flatBytes[index]);
+ (*this)->exactMidBytes = &(flatBytes[idx]);
return errorBoundMode;
}
else
@@ -74,42 +74,42 @@ int new_TightDataPointStorageD_fromFlatBytes(TightDataPointStorageD **this, unsi
if(isRegression == 1)
{
(*this)->raBytes_size = flatBytesLength - 3 - 1 - MetaDataByteLength_double - pde_exe->SZ_SIZE_TYPE;
- (*this)->raBytes = &(flatBytes[index]);
+ (*this)->raBytes = &(flatBytes[idx]);
return errorBoundMode;
}
unsigned char byteBuf[8];
for (i = 0; i < 4; i++)
- byteBuf[i] = flatBytes[index++];
+ byteBuf[i] = flatBytes[idx++];
int max_quant_intervals = bytesToInt_bigEndian(byteBuf);// 4
pde_params->maxRangeRadius = max_quant_intervals/2;
for (i = 0; i < 4; i++)
- byteBuf[i] = flatBytes[index++];
+ byteBuf[i] = flatBytes[idx++];
(*this)->intervals = bytesToInt_bigEndian(byteBuf);// 4
for (i = 0; i < 8; i++)
- byteBuf[i] = flatBytes[index++];
+ byteBuf[i] = flatBytes[idx++];
(*this)->medianValue = bytesToDouble(byteBuf);//8
- (*this)->reqLength = flatBytes[index++]; //1
+ (*this)->reqLength = flatBytes[idx++]; //1
for (i = 0; i < 8; i++)
- byteBuf[i] = flatBytes[index++];
+ byteBuf[i] = flatBytes[idx++];
(*this)->realPrecision = bytesToDouble(byteBuf);//8
for (i = 0; i < pde_exe->SZ_SIZE_TYPE; i++)
- byteBuf[i] = flatBytes[index++];
+ byteBuf[i] = flatBytes[idx++];
(*this)->typeArray_size = bytesToSize(byteBuf, pde_exe->SZ_SIZE_TYPE);
for (i = 0; i < pde_exe->SZ_SIZE_TYPE; i++)
- byteBuf[i] = flatBytes[index++];
+ byteBuf[i] = flatBytes[idx++];
(*this)->exactDataNum = bytesToSize(byteBuf, pde_exe->SZ_SIZE_TYPE);// ST
for (i = 0; i < pde_exe->SZ_SIZE_TYPE; i++)
- byteBuf[i] = flatBytes[index++];
+ byteBuf[i] = flatBytes[idx++];
(*this)->exactMidBytes_size = bytesToSize(byteBuf, pde_exe->SZ_SIZE_TYPE);// ST
size_t logicLeadNumBitsNum = (*this)->exactDataNum * 2;
@@ -122,12 +122,12 @@ int new_TightDataPointStorageD_fromFlatBytes(TightDataPointStorageD **this, unsi
(*this)->leadNumArray_size = (logicLeadNumBitsNum >> 3) + 1;
}
- (*this)->typeArray = &flatBytes[index];
+ (*this)->typeArray = &flatBytes[idx];
//retrieve the number of states (i.e., stateNum)
(*this)->allNodes = bytesToInt_bigEndian((*this)->typeArray); //the first 4 bytes store the stateNum
(*this)->stateNum = ((*this)->allNodes+1)/2;
- index+=(*this)->typeArray_size;
+ idx+=(*this)->typeArray_size;
// todo need check length
@@ -135,15 +135,15 @@ int new_TightDataPointStorageD_fromFlatBytes(TightDataPointStorageD **this, unsi
- pde_exe->SZ_SIZE_TYPE - pde_exe->SZ_SIZE_TYPE - pde_exe->SZ_SIZE_TYPE
- (*this)->leadNumArray_size - (*this)->exactMidBytes_size - (*this)->typeArray_size;
- (*this)->leadNumArray = &flatBytes[index];
+ (*this)->leadNumArray = &flatBytes[idx];
- index+=(*this)->leadNumArray_size;
+ idx+=(*this)->leadNumArray_size;
- (*this)->exactMidBytes = &flatBytes[index];
+ (*this)->exactMidBytes = &flatBytes[idx];
- index+=(*this)->exactMidBytes_size;
+ idx+=(*this)->exactMidBytes_size;
- (*this)->residualMidBits = &flatBytes[index];
+ (*this)->residualMidBits = &flatBytes[idx];
return errorBoundMode;
diff --git a/deps/TSZ/sz/src/TightDataPointStorageF.c b/deps/TSZ/sz/src/TightDataPointStorageF.c
index cb1d79b8273d225c240548cebba3f776c7b0345c..16d524b9b44cd51db2af881b83a6a73b97f246f4 100644
--- a/deps/TSZ/sz/src/TightDataPointStorageF.c
+++ b/deps/TSZ/sz/src/TightDataPointStorageF.c
@@ -25,15 +25,15 @@ void new_TightDataPointStorageF_Empty(TightDataPointStorageF **this)
int new_TightDataPointStorageF_fromFlatBytes(TightDataPointStorageF **this, unsigned char* flatBytes, size_t flatBytesLength, sz_exedata* pde_exe, sz_params* pde_params)
{
new_TightDataPointStorageF_Empty(this);
- size_t i, index = 0;
+ size_t i, idx = 0;
//
// parse tdps
//
// 1 version(1)
- unsigned char version = flatBytes[index++]; //1
- unsigned char sameRByte = flatBytes[index++]; //1
+ unsigned char version = flatBytes[idx++]; //1
+ unsigned char sameRByte = flatBytes[idx++]; //1
// parse data format
switch (version)
@@ -51,12 +51,12 @@ int new_TightDataPointStorageF_fromFlatBytes(TightDataPointStorageF **this, unsi
pde_exe->SZ_SIZE_TYPE = ((sameRByte & 0x40)>>6)==1?8:4; //0100,0000
int errorBoundMode = SZ_ABS;
// 3 meta(2)
- convertBytesToSZParams(&(flatBytes[index]), pde_params, pde_exe);
- index += MetaDataByteLength;
+ convertBytesToSZParams(&(flatBytes[idx]), pde_params, pde_exe);
+ idx += MetaDataByteLength;
// 4 element count(4)
unsigned char dsLengthBytes[8];
for (i = 0; i < pde_exe->SZ_SIZE_TYPE; i++)
- dsLengthBytes[i] = flatBytes[index++];
+ dsLengthBytes[i] = flatBytes[idx++];
(*this)->dataSeriesLength = bytesToSize(dsLengthBytes, pde_exe->SZ_SIZE_TYPE);// 4 or 8
if((*this)->isLossless==1)
{
@@ -66,7 +66,7 @@ int new_TightDataPointStorageF_fromFlatBytes(TightDataPointStorageF **this, unsi
else if(same==1)
{
(*this)->allSameData = 1;
- (*this)->exactMidBytes = &(flatBytes[index]);
+ (*this)->exactMidBytes = &(flatBytes[idx]);
return errorBoundMode;
}
else
@@ -76,40 +76,40 @@ int new_TightDataPointStorageF_fromFlatBytes(TightDataPointStorageF **this, unsi
if(isRegression == 1)
{
(*this)->raBytes_size = flatBytesLength - 1 - 1 - MetaDataByteLength - pde_exe->SZ_SIZE_TYPE;
- (*this)->raBytes = &(flatBytes[index]);
+ (*this)->raBytes = &(flatBytes[idx]);
return errorBoundMode;
}
// 5 quant intervals(4)
unsigned char byteBuf[8];
for (i = 0; i < 4; i++)
- byteBuf[i] = flatBytes[index++];
+ byteBuf[i] = flatBytes[idx++];
int max_quant_intervals = bytesToInt_bigEndian(byteBuf);// 4
pde_params->maxRangeRadius = max_quant_intervals/2;
// 6 intervals
for (i = 0; i < 4; i++)
- byteBuf[i] = flatBytes[index++];
+ byteBuf[i] = flatBytes[idx++];
(*this)->intervals = bytesToInt_bigEndian(byteBuf);// 4
// 7 median
for (i = 0; i < 4; i++)
- byteBuf[i] = flatBytes[index++];
+ byteBuf[i] = flatBytes[idx++];
(*this)->medianValue = bytesToFloat(byteBuf); //4
// 8 reqLength
- (*this)->reqLength = flatBytes[index++]; //1
+ (*this)->reqLength = flatBytes[idx++]; //1
// 9 realPrecision(8)
for (i = 0; i < 8; i++)
- byteBuf[i] = flatBytes[index++];
+ byteBuf[i] = flatBytes[idx++];
(*this)->realPrecision = bytesToDouble(byteBuf);//8
// 10 typeArray_size
for (i = 0; i < pde_exe->SZ_SIZE_TYPE; i++)
- byteBuf[i] = flatBytes[index++];
+ byteBuf[i] = flatBytes[idx++];
(*this)->typeArray_size = bytesToSize(byteBuf, pde_exe->SZ_SIZE_TYPE);// 4
// 11 exactNum
for (i = 0; i < pde_exe->SZ_SIZE_TYPE; i++)
- byteBuf[i] = flatBytes[index++];
+ byteBuf[i] = flatBytes[idx++];
(*this)->exactDataNum = bytesToSize(byteBuf, pde_exe->SZ_SIZE_TYPE);// ST
// 12 mid size
for (i = 0; i < pde_exe->SZ_SIZE_TYPE; i++)
- byteBuf[i] = flatBytes[index++];
+ byteBuf[i] = flatBytes[idx++];
(*this)->exactMidBytes_size = bytesToSize(byteBuf, pde_exe->SZ_SIZE_TYPE);// STqq
// calc leadNumArray_size
@@ -124,20 +124,20 @@ int new_TightDataPointStorageF_fromFlatBytes(TightDataPointStorageF **this, unsi
}
// 13 typeArray
- (*this)->typeArray = &flatBytes[index];
+ (*this)->typeArray = &flatBytes[idx];
//retrieve the number of states (i.e., stateNum)
(*this)->allNodes = bytesToInt_bigEndian((*this)->typeArray); //the first 4 bytes store the stateNum
(*this)->stateNum = ((*this)->allNodes+1)/2;
- index+=(*this)->typeArray_size;
+ idx+=(*this)->typeArray_size;
// 14 leadNumArray
- (*this)->leadNumArray = &flatBytes[index];
- index += (*this)->leadNumArray_size;
+ (*this)->leadNumArray = &flatBytes[idx];
+ idx += (*this)->leadNumArray_size;
// 15 exactMidBytes
- (*this)->exactMidBytes = &flatBytes[index];
- index+=(*this)->exactMidBytes_size;
+ (*this)->exactMidBytes = &flatBytes[idx];
+ idx+=(*this)->exactMidBytes_size;
// 16 residualMidBits
- (*this)->residualMidBits = &flatBytes[index];
+ (*this)->residualMidBits = &flatBytes[idx];
// calc residualMidBits_size
(*this)->residualMidBits_size = flatBytesLength - 1 - 1 - MetaDataByteLength - pde_exe->SZ_SIZE_TYPE - 4 - 4 - 4 - 1 - 8
diff --git a/deps/cJson/src/cJSON.c b/deps/cJson/src/cJSON.c
index ff93e8730d4e9b378efaa5c9039eb886e3a30e97..53698b5baf6513aa502379272d9fe51585459110 100644
--- a/deps/cJson/src/cJSON.c
+++ b/deps/cJson/src/cJSON.c
@@ -1683,7 +1683,7 @@ CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array)
return (int)size;
}
-static cJSON* get_array_item(const cJSON *array, size_t index)
+static cJSON* get_array_item(const cJSON *array, size_t idx)
{
cJSON *current_child = NULL;
@@ -1693,23 +1693,23 @@ static cJSON* get_array_item(const cJSON *array, size_t index)
}
current_child = array->child;
- while ((current_child != NULL) && (index > 0))
+ while ((current_child != NULL) && (idx > 0))
{
- index--;
+ idx--;
current_child = current_child->next;
}
return current_child;
}
-CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index)
+CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int idx)
{
- if (index < 0)
+ if (idx < 0)
{
return NULL;
}
- return get_array_item(array, (size_t)index);
+ return get_array_item(array, (size_t)idx);
}
static cJSON *get_object_item(const cJSON * const object, const char * const name, const cJSON_bool case_sensitive)
diff --git a/deps/lua/src/ldump.c b/deps/lua/src/ldump.c
index f08277d3ac440a48a3f771113729f39f492e57cc..4b205914887a78da7f20d0725d00ba4c8841b206 100644
--- a/deps/lua/src/ldump.c
+++ b/deps/lua/src/ldump.c
@@ -60,7 +60,7 @@ static void DumpVector(const void* b, int n, size_t size, DumpState* D)
static void DumpString(const TString* s, DumpState* D)
{
- if (s==NULL || getstr(s)==NULL)
+ if (s==NULL)
{
size_t size=0;
DumpVar(size,D);
diff --git a/docs/en/01-index.md b/docs/en/01-index.md
index d76c12e10fce24dff9f916945f5b6236857ebb8d..1f2f88d47d8d20e55c6a495f571bd0d11a600d74 100644
--- a/docs/en/01-index.md
+++ b/docs/en/01-index.md
@@ -6,7 +6,7 @@ slug: /
TDengine is a [high-performance](https://tdengine.com/fast), [scalable](https://tdengine.com/scalable) time series database with [SQL support](https://tdengine.com/sql-support). This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design and other topics. It’s written mainly for architects, developers and system administrators.
-To get a global view about TDengine, like feature list, benchmarks, and competitive advantages, please browse through section [Introduction](./intro).
+To get a global view about TDengine, like feature list, benchmarks, and competitive advantages, please browse through section [Introduction](./intro). If you want to get some basics about time-series databases, please check [here](https://tdengine.com/tsdb).
TDengine greatly improves the efficiency of data ingestion, querying and storage by exploiting the characteristics of time series data, introducing the novel concepts of "one table for one data collection point" and "super table", and designing an innovative storage engine. To understand the new concepts in TDengine and make full use of the features and capabilities of TDengine, please read [“Concepts”](./concept) thoroughly.
diff --git a/docs/en/02-intro/index.md b/docs/en/02-intro/index.md
index 52b25bf949cab87f66a360f5224b3df9408c53bf..23cf1203b29c24ef92214e11f2c40b09796b4cd6 100644
--- a/docs/en/02-intro/index.md
+++ b/docs/en/02-intro/index.md
@@ -110,4 +110,6 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
- [TDengine vs InfluxDB、OpenTSDB、Cassandra、MySQL、ClickHouse](https://www.tdengine.com/downloads/TDengine_Testing_Report_en.pdf)
- [TDengine vs OpenTSDB](https://tdengine.com/2019/09/12/710.html)
- [TDengine vs Cassandra](https://tdengine.com/2019/09/12/708.html)
-- [TDengine vs InfluxDB](https://tdengine.com/2019/09/12/706.html)
\ No newline at end of file
+- [TDengine vs InfluxDB](https://tdengine.com/2019/09/12/706.html)
+
+If you want to learn some basics about time-series databases, please check [here](https://tdengine.com/tsdb).
diff --git a/docs/en/07-develop/03-insert-data/05-high-volume.md b/docs/en/07-develop/03-insert-data/05-high-volume.md
new file mode 100644
index 0000000000000000000000000000000000000000..fc4f071997caeecfda6e314bd7f60278d8cf672c
--- /dev/null
+++ b/docs/en/07-develop/03-insert-data/05-high-volume.md
@@ -0,0 +1,444 @@
+---
+sidebar_label: High Performance Writing
+title: High Performance Writing
+---
+
+import Tabs from "@theme/Tabs";
+import TabItem from "@theme/TabItem";
+
+This chapter introduces how to write data into TDengine with high throughput.
+
+## How to achieve high performance data writing
+
+To achieve high performance writing, there are a few aspects to consider. In the following sections we will describe these important factors in achieving high performance writing.
+
+### Application Program
+
+From the perspective of application program, you need to consider:
+
+1. The data size of each single write, also known as batch size. Generally speaking, higher batch size generates better writing performance. However, once the batch size is over a specific value, you will not get any additional benefit anymore. When using SQL to write into TDengine, it's better to put as much as possible data in single SQL. The maximum SQL length supported by TDengine is 1,048,576 bytes, i.e. 1 MB. It can be configured by parameter `maxSQLLength` on client side, and the default value is 65,480.
+
+2. The number of concurrent connections. Normally more connections can get better result. However, once the number of connections exceeds the processing ability of the server side, the performance may downgrade.
+
+3. The distribution of data to be written across tables or sub-tables. Writing to single table in one batch is more efficient than writing to multiple tables in one batch.
+
+4. Data Writing Protocol.
+ - Prameter binding mode is more efficient than SQL because it doesn't have the cost of parsing SQL.
+ - Writing to known existing tables is more efficient than wirting to uncertain tables in automatic creating mode because the later needs to check whether the table exists or not before actually writing data into it
+ - Writing in SQL is more efficient than writing in schemaless mode because schemaless writing creats table automatically and may alter table schema
+
+Application programs need to take care of the above factors and try to take advantage of them. The application progam should write to single table in each write batch. The batch size needs to be tuned to a proper value on a specific system. The number of concurrent connections needs to be tuned to a proper value too to achieve the best writing throughput.
+
+### Data Source
+
+Application programs need to read data from data source then write into TDengine. If you meet one or more of below situations, you need to setup message queues between the threads for reading from data source and the threads for writing into TDengine.
+
+1. There are multiple data sources, the data generation speed of each data source is much slower than the speed of single writing thread. In this case, the purpose of message queues is to consolidate the data from multiple data sources together to increase the batch size of single write.
+2. The speed of data generation from single data source is much higher than the speed of single writing thread. The purpose of message queue in this case is to provide buffer so that data is not lost and multiple writing threads can get data from the buffer.
+3. The data for single table are from multiple data source. In this case the purpose of message queues is to combine the data for single table together to improve the write efficiency.
+
+If the data source is Kafka, then the appication program is a consumer of Kafka, you can benefit from some kafka features to achieve high performance writing:
+
+1. Put the data for a table in single partition of single topic so that it's easier to put the data for each table together and write in batch
+2. Subscribe multiple topics to accumulate data together.
+3. Add more consumers to gain more concurrency and throughput.
+4. Incrase the size of single fetch to increase the size of write batch.
+
+### Tune TDengine
+
+TDengine is a distributed and high performance time series database, there are also some ways to tune TDengine to get better writing performance.
+
+1. Set proper number of `vgroups` according to available CPU cores. Normally, we recommend 2 \* number_of_cores as a starting point. If the verification result shows this is not enough to utilize CPU resources, you can use a higher value.
+2. Set proper `minTablesPerVnode`, `tableIncStepPerVnode`, and `maxVgroupsPerDb` according to the number of tables so that tables are distributed even across vgroups. The purpose is to balance the workload among all vnodes so that system resources can be utilized better to get higher performance.
+
+For more performance tuning tips, please refer to [Performance Optimization](../../operation/optimize) and [Configuration Parameters](../../reference/config).
+
+## Sample Programs
+
+This section will introduce the sample programs to demonstrate how to write into TDengine with high performance.
+
+### Scenario
+
+Below are the scenario for the sample programs of high performance wrting.
+
+- Application program reads data from data source, the sample program simulates a data source by generating data
+- The speed of single writing thread is much slower than the speed of generating data, so the program starts multiple writing threads while each thread establish a connection to TDengine and each thread has a message queue of fixed size.
+- Application program maps the received data to different writing threads based on table name to make sure all the data for each table is always processed by a specific writing thread.
+- Each writing thread writes the received data into TDengine once the message queue becomes empty or the read data meets a threshold.
+
+
+
+### Sample Programs
+
+The sample programs listed in this section are based on the scenario described previously. If your scenarios is different, please try to adjust the code based on the principles described in this chapter.
+
+The sample programs assume the source data is for all the different sub tables in same super table (meters). The super table has been created before the sample program starts to writing data. Sub tables are created automatically according to received data. If there are multiple super tables in your case, please try to adjust the part of creating table automatically.
+
+
+
+
+**Program Inventory**
+
+| Class | Description |
+| ---------------- | ----------------------------------------------------------------------------------------------------- |
+| FastWriteExample | Main Program |
+| ReadTask | Read data from simulated data source and put into a queue according to the hash value of table name |
+| WriteTask | Read data from Queue, compose a wirte batch and write into TDengine |
+| MockDataSource | Generate data for some sub tables of super table meters |
+| SQLWriter | WriteTask uses this class to compose SQL, create table automatically, check SQL length and write data |
+| StmtWriter | Write in Parameter binding mode (Not finished yet) |
+| DataBaseMonitor | Calculate the writing speed and output on console every 10 seconds |
+
+Below is the list of complete code of the classes in above table and more detailed description.
+
+
+FastWriteExample
+The main Program is responsible for:
+
+1. Create message queues
+2. Start writing threads
+3. Start reading threads
+4. Otuput writing speed every 10 seconds
+
+The main program provides 4 parameters for tuning:
+
+1. The number of reading threads, default value is 1
+2. The number of writing threads, default alue is 2
+3. The total number of tables in the generated data, default value is 1000. These tables are distributed evenly across all writing threads. If the number of tables is very big, it will cost much time to firstly create these tables.
+4. The batch size of single write, default value is 3,000
+
+The capacity of message queue also impacts performance and can be tuned by modifying program. Normally it's always better to have a larger message queue. A larger message queue means lower possibility of being blocked when enqueueing and higher throughput. But a larger message queue consumes more memory space. The default value used in the sample programs is already big enoug.
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java}}
+```
+
+
+
+
+ReadTask
+
+ReadTask reads data from data source. Each ReadTask is associated with a simulated data source, each data source generates data for a group of specific tables, and the data of any table is only generated from a single specific data source.
+
+ReadTask puts data in message queue in blocking mode. That means, the putting operation is blocked if the message queue is full.
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java}}
+```
+
+
+
+
+WriteTask
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java}}
+```
+
+
+
+
+
+MockDataSource
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java}}
+```
+
+
+
+
+
+SQLWriter
+
+SQLWriter class encapsulates the logic of composing SQL and writing data. Please be noted that the tables have not been created before writing, but are created automatically when catching the exception of table doesn't exist. For other exceptions caught, the SQL which caused the exception are logged for you to debug.
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java}}
+```
+
+
+
+
+
+DataBaseMonitor
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java}}
+```
+
+
+
+**Steps to Launch**
+
+
+Launch Java Sample Program
+
+You need to set environment variable `TDENGINE_JDBC_URL` before launching the program. If TDengine Server is setup on localhost, then the default value for user name, password and port can be used, like below:
+
+```
+TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata"
+```
+
+**Launch in IDE**
+
+1. Clone TDengine repolitory
+ ```
+ git clone git@github.com:taosdata/TDengine.git --depth 1
+ ```
+2. Use IDE to open `docs/examples/java` directory
+3. Configure environment variable `TDENGINE_JDBC_URL`, you can also configure it before launching the IDE, if so you can skip this step.
+4. Run class `com.taos.example.highvolume.FastWriteExample`
+
+**Launch on server**
+
+If you want to launch the sample program on a remote server, please follow below steps:
+
+1. Package the sample programs. Execute below command under directory `TDengine/docs/examples/java` :
+ ```
+ mvn package
+ ```
+2. Create `examples/java` directory on the server
+ ```
+ mkdir -p examples/java
+ ```
+3. Copy dependencies (below commands assume you are working on a local Windows host and try to launch on a remote Linux host)
+ - Copy dependent packages
+ ```
+ scp -r .\target\lib @:~/examples/java
+ ```
+ - Copy the jar of sample programs
+ ```
+ scp -r .\target\javaexample-1.0.jar @:~/examples/java
+ ```
+4. Configure environment variable
+ Edit `~/.bash_profile` or `~/.bashrc` and add below:
+
+ ```
+ export TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata"
+ ```
+
+ If your TDengine server is not deployed on localhost or doesn't use default port, you need to change the above URL to correct value in your environment.
+
+5. Launch the sample program
+
+ ```
+ java -classpath lib/*:javaexample-1.0.jar com.taos.example.highvolume.FastWriteExample
+ ```
+
+6. The sample program doesn't exit unless you press CTRL + C to terminate it.
+ Below is the output of running on a server of 16 cores, 64GB memory and SSD hard disk.
+
+ ```
+ root@vm85$ java -classpath lib/*:javaexample-1.0.jar com.taos.example.highvolume.FastWriteExample 2 12
+ 18:56:35.896 [main] INFO c.t.e.highvolume.FastWriteExample - readTaskCount=2, writeTaskCount=12 tableCount=1000 maxBatchSize=3000
+ 18:56:36.011 [WriteThread-0] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.015 [WriteThread-0] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.021 [WriteThread-1] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.022 [WriteThread-1] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.031 [WriteThread-2] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.032 [WriteThread-2] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.041 [WriteThread-3] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.042 [WriteThread-3] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.093 [WriteThread-4] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.094 [WriteThread-4] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.099 [WriteThread-5] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.100 [WriteThread-5] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.100 [WriteThread-6] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.101 [WriteThread-6] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.103 [WriteThread-7] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.104 [WriteThread-7] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.105 [WriteThread-8] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.107 [WriteThread-8] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.108 [WriteThread-9] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.109 [WriteThread-9] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.156 [WriteThread-10] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.157 [WriteThread-11] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.158 [WriteThread-10] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.158 [ReadThread-0] INFO com.taos.example.highvolume.ReadTask - started
+ 18:56:36.158 [ReadThread-1] INFO com.taos.example.highvolume.ReadTask - started
+ 18:56:36.158 [WriteThread-11] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:46.369 [main] INFO c.t.e.highvolume.FastWriteExample - count=18554448 speed=1855444
+ 18:56:56.946 [main] INFO c.t.e.highvolume.FastWriteExample - count=39059660 speed=2050521
+ 18:57:07.322 [main] INFO c.t.e.highvolume.FastWriteExample - count=59403604 speed=2034394
+ 18:57:18.032 [main] INFO c.t.e.highvolume.FastWriteExample - count=80262938 speed=2085933
+ 18:57:28.432 [main] INFO c.t.e.highvolume.FastWriteExample - count=101139906 speed=2087696
+ 18:57:38.921 [main] INFO c.t.e.highvolume.FastWriteExample - count=121807202 speed=2066729
+ 18:57:49.375 [main] INFO c.t.e.highvolume.FastWriteExample - count=142952417 speed=2114521
+ 18:58:00.689 [main] INFO c.t.e.highvolume.FastWriteExample - count=163650306 speed=2069788
+ 18:58:11.646 [main] INFO c.t.e.highvolume.FastWriteExample - count=185019808 speed=2136950
+ ```
+
+
+
+
+
+
+**Program Inventory**
+
+Sample programs in Python uses multi-process and cross-process message queues.
+
+| Function/CLass | Description |
+| ---------------------------- | --------------------------------------------------------------------------- |
+| main Function | Program entry point, create child processes and message queues |
+| run_monitor_process Function | Create database, super table, calculate writing speed and output to console |
+| run_read_task Function | Read data and distribute to message queues |
+| MockDataSource Class | Simulate data source, return next 1,000 rows of each table |
+| run_write_task Function | Read as much as possible data from message queue and write in batch |
+| SQLWriter Class | Write in SQL and create table utomatically |
+| StmtWriter Class | Write in parameter binding mode (not finished yet) |
+
+
+main function
+
+`main` function is responsible for creating message queues and fork child processes, there are 3 kinds of child processes:
+
+1. Monitoring process, initializes database and calculating writing speed
+2. Reading process (n), reads data from data source
+3. Writing process (m), wirtes data into TDengine
+
+`main` function provides 5 parameters:
+
+1. The number of reading tasks, default value is 1
+2. The number of writing tasks, default value is 1
+3. The number of tables, default value is 1,000
+4. The capacity of message queue, default value is 1,000,000 bytes
+5. The batch size in single write, default value is 3000
+
+```python
+{{#include docs/examples/python/fast_write_example.py:main}}
+```
+
+
+
+
+run_monitor_process
+
+Monitoring process initilizes database and monitoring writing speed.
+
+```python
+{{#include docs/examples/python/fast_write_example.py:monitor}}
+```
+
+
+
+
+
+run_read_task function
+
+Reading process reads data from other data system and distributes to the message queue allocated for it.
+
+```python
+{{#include docs/examples/python/fast_write_example.py:read}}
+```
+
+
+
+
+
+MockDataSource
+
+Below is the simulated data source, we assume table name exists in each generated data.
+
+```python
+{{#include docs/examples/python/mockdatasource.py}}
+```
+
+
+
+
+run_write_task function
+
+Writing process tries to read as much as possible data from message queue and writes in batch.
+
+```python
+{{#include docs/examples/python/fast_write_example.py:write}}
+```
+
+
+
+
+
+SQLWriter class encapsulates the logic of composing SQL and writing data. Please be noted that the tables have not been created before writing, but are created automatically when catching the exception of table doesn't exist. For other exceptions caught, the SQL which caused the exception are logged for you to debug. This class also checks the SQL length, if the SQL length is closed to `maxSQLLength` the SQL will be executed immediately. To improve writing efficiency, it's better to increase `maxSQLLength` properly.
+
+SQLWriter
+
+```python
+{{#include docs/examples/python/sql_writer.py}}
+```
+
+
+
+**Steps to Launch**
+
+
+
+Launch Sample Program in Python
+
+1. Prerequisities
+
+ - TDengine client driver has been installed
+ - Python3 has been installed, the the version >= 3.8
+ - TDengine Python connector `taospy` has been installed
+
+2. Install faster-fifo to replace python builtin multiprocessing.Queue
+
+ ```
+ pip3 install faster-fifo
+ ```
+
+3. Click the "Copy" in the above sample programs to copy `fast_write_example.py` 、 `sql_writer.py` and `mockdatasource.py`.
+
+4. Execute the program
+
+ ```
+ python3 fast_write_example.py
+ ```
+
+ Below is the output of running on a server of 16 cores, 64GB memory and SSD hard disk.
+
+ ```
+ root@vm85$ python3 fast_write_example.py 8 8
+ 2022-07-14 19:13:45,869 [root] - READ_TASK_COUNT=8, WRITE_TASK_COUNT=8, TABLE_COUNT=1000, QUEUE_SIZE=1000000, MAX_BATCH_SIZE=3000
+ 2022-07-14 19:13:48,882 [root] - WriteTask-0 started with pid 718347
+ 2022-07-14 19:13:48,883 [root] - WriteTask-1 started with pid 718348
+ 2022-07-14 19:13:48,884 [root] - WriteTask-2 started with pid 718349
+ 2022-07-14 19:13:48,884 [root] - WriteTask-3 started with pid 718350
+ 2022-07-14 19:13:48,885 [root] - WriteTask-4 started with pid 718351
+ 2022-07-14 19:13:48,885 [root] - WriteTask-5 started with pid 718352
+ 2022-07-14 19:13:48,886 [root] - WriteTask-6 started with pid 718353
+ 2022-07-14 19:13:48,886 [root] - WriteTask-7 started with pid 718354
+ 2022-07-14 19:13:48,887 [root] - ReadTask-0 started with pid 718355
+ 2022-07-14 19:13:48,888 [root] - ReadTask-1 started with pid 718356
+ 2022-07-14 19:13:48,889 [root] - ReadTask-2 started with pid 718357
+ 2022-07-14 19:13:48,889 [root] - ReadTask-3 started with pid 718358
+ 2022-07-14 19:13:48,890 [root] - ReadTask-4 started with pid 718359
+ 2022-07-14 19:13:48,891 [root] - ReadTask-5 started with pid 718361
+ 2022-07-14 19:13:48,892 [root] - ReadTask-6 started with pid 718364
+ 2022-07-14 19:13:48,893 [root] - ReadTask-7 started with pid 718365
+ 2022-07-14 19:13:56,042 [DataBaseMonitor] - count=6676310 speed=667631.0
+ 2022-07-14 19:14:06,196 [DataBaseMonitor] - count=20004310 speed=1332800.0
+ 2022-07-14 19:14:16,366 [DataBaseMonitor] - count=32290310 speed=1228600.0
+ 2022-07-14 19:14:26,527 [DataBaseMonitor] - count=44438310 speed=1214800.0
+ 2022-07-14 19:14:36,673 [DataBaseMonitor] - count=56608310 speed=1217000.0
+ 2022-07-14 19:14:46,834 [DataBaseMonitor] - count=68757310 speed=1214900.0
+ 2022-07-14 19:14:57,280 [DataBaseMonitor] - count=80992310 speed=1223500.0
+ 2022-07-14 19:15:07,689 [DataBaseMonitor] - count=93805310 speed=1281300.0
+ 2022-07-14 19:15:18,020 [DataBaseMonitor] - count=106111310 speed=1230600.0
+ 2022-07-14 19:15:28,356 [DataBaseMonitor] - count=118394310 speed=1228300.0
+ 2022-07-14 19:15:38,690 [DataBaseMonitor] - count=130742310 speed=1234800.0
+ 2022-07-14 19:15:49,000 [DataBaseMonitor] - count=143051310 speed=1230900.0
+ 2022-07-14 19:15:59,323 [DataBaseMonitor] - count=155276310 speed=1222500.0
+ 2022-07-14 19:16:09,649 [DataBaseMonitor] - count=167603310 speed=1232700.0
+ 2022-07-14 19:16:19,995 [DataBaseMonitor] - count=179976310 speed=1237300.0
+ ```
+
+
+
+:::note
+Don't establish connection to TDengine in the parent process if using Python connector in multi-process way, otherwise all the connections in child processes are blocked always. This is a known issue.
+
+:::
+
+
+
diff --git a/docs/en/07-develop/03-insert-data/highvolume.webp b/docs/en/07-develop/03-insert-data/highvolume.webp
new file mode 100644
index 0000000000000000000000000000000000000000..46dfc74ae3b0043c591ff930c62251da49cae7ad
Binary files /dev/null and b/docs/en/07-develop/03-insert-data/highvolume.webp differ
diff --git a/docs/en/12-taos-sql/02-database.md b/docs/en/12-taos-sql/02-database.md
index 80581b2f1bc7ce9cd046c18873d3f22b6804d8cf..c2961d62415cd7d23b031777082801426b221190 100644
--- a/docs/en/12-taos-sql/02-database.md
+++ b/docs/en/12-taos-sql/02-database.md
@@ -32,7 +32,6 @@ CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1];
- cacheLast: [Description](/reference/config/#cachelast)
- replica: [Description](/reference/config/#replica)
- quorum: [Description](/reference/config/#quorum)
- - maxVgroupsPerDb: [Description](/reference/config/#maxvgroupsperdb)
- comp: [Description](/reference/config/#comp)
- precision: [Description](/reference/config/#precision)
6. Please note that all of the parameters mentioned in this section are configured in configuration file `taos.cfg` on the TDengine server. If not specified in the `create database` statement, the values from taos.cfg are used by default. To override default parameters, they must be specified in the `create database` statement.
diff --git a/docs/en/14-reference/03-connector/java.mdx b/docs/en/14-reference/03-connector/java.mdx
index ff15acf1a9c5dbfd74e6f3101459cfc7bdeda515..22f99bb9ae8fa669155ba8ac7cec1ad2c609cb32 100644
--- a/docs/en/14-reference/03-connector/java.mdx
+++ b/docs/en/14-reference/03-connector/java.mdx
@@ -91,7 +91,7 @@ Add following dependency in the `pom.xml` file of your Maven project:
You can build Java connector from source code after cloning the TDengine project:
```
-git clone https://github.com/taosdata/taos-connector-jdbc.git
+git clone https://github.com/taosdata/taos-connector-jdbc.git --branch 2.0
cd taos-connector-jdbc
mvn clean install -Dmaven.test.skip=true
```
@@ -140,34 +140,34 @@ When you use a JDBC native connection to connect to a TDengine cluster, you can
1. Do not specify hostname and port in Java applications.
- ```java
- public Connection getConn() throws Exception{
- Class.forName("com.taosdata.jdbc.TSDBDriver");
- String jdbcUrl = "jdbc:TAOS://:/test?user=root&password=taosdata";
- Properties connProps = new Properties();
- connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
- connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
- connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
- Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
- return conn;
- }
- ```
+ ```java
+ public Connection getConn() throws Exception{
+ Class.forName("com.taosdata.jdbc.TSDBDriver");
+ String jdbcUrl = "jdbc:TAOS://:/test?user=root&password=taosdata";
+ Properties connProps = new Properties();
+ connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
+ connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
+ connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
+ Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
+ return conn;
+ }
+ ```
2. specify the firstEp and the secondEp in the configuration file taos.cfg
- ```shell
- # first fully qualified domain name (FQDN) for TDengine system
- firstEp cluster_node1:6030
+ ```shell
+ # first fully qualified domain name (FQDN) for TDengine system
+ firstEp cluster_node1:6030
- # second fully qualified domain name (FQDN) for TDengine system, for cluster only
- secondEp cluster_node2:6030
+ # second fully qualified domain name (FQDN) for TDengine system, for cluster only
+ secondEp cluster_node2:6030
- # default system charset
- # charset UTF-8
+ # default system charset
+ # charset UTF-8
- # system locale
- # locale en_US.UTF-8
- ```
+ # system locale
+ # locale en_US.UTF-8
+ ```
In the above example, JDBC uses the client's configuration file to establish a connection to a hostname `cluster_node1`, port 6030, and a database named `test`. When the firstEp node in the cluster fails, JDBC attempts to connect to the cluster using secondEp.
@@ -202,6 +202,10 @@ The configuration parameters in the URL are as follows.
- batchfetch: true: pull the result set in batch when executing the query; false: pull the result set row by row. The default value is false. batchfetch uses HTTP for data transfer. The JDBC REST connection supports bulk data pulling function in taos-jdbcdriver-2.0.38 and TDengine 2.4.0.12 and later versions. taos-jdbcdriver and TDengine transfer data via WebSocket connection. Compared with HTTP, WebSocket enables JDBC REST connection to support large data volume querying and improve query performance.
- charset: specify the charset to parse the string, this parameter is valid only when set batchfetch to true.
- batchErrorIgnore: true: when executing executeBatch of Statement, if one SQL execution fails in the middle, continue to execute the following SQL. false: no longer execute any statement after the failed SQL. The default value is: false.
+- httpConnectTimeout: REST connection timeout in milliseconds, the default value is 5000 ms.
+- httpSocketTimeout: socket timeout in milliseconds, the default value is 5000 ms. It only takes effect when batchfetch is false.
+- messageWaitTimeout: message transmission timeout in milliseconds, the default value is 3000 ms. It only takes effect when batchfetch is true.
+- useSSL: connecting Securely Using SSL. true: using SSL conneciton, false: not using SSL connection.
**Note**: Some configuration items (e.g., locale, timezone) do not work in the REST connection.
@@ -257,14 +261,18 @@ In the above example, a connection is established to `taosdemo.com`, port is 603
The configuration parameters in properties are as follows.
-- TSDBDriver.PROPERTY_KEY_USER: Login TDengine user name, default value 'root'.
+- TSDBDriver.PROPERTY_KEY_USER: login TDengine user name, default value 'root'.
- TSDBDriver.PROPERTY_KEY_PASSWORD: user login password, default value 'taosdata'.
- TSDBDriver.PROPERTY_KEY_BATCH_LOAD: true: pull the result set in batch when executing query; false: pull the result set row by row. The default value is: false.
- TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE: true: when executing executeBatch of Statement, if there is a SQL execution failure in the middle, continue to execute the following sq. false: no longer execute any statement after the failed SQL. The default value is: false.
-- TSDBDriver.PROPERTY_KEY_CONFIG_DIR: Only works when using JDBC native connection. Client configuration file directory path, default value `/etc/taos` on Linux OS, default value `C:/TDengine/cfg` on Windows OS.
+- TSDBDriver.PROPERTY_KEY_CONFIG_DIR: only works when using JDBC native connection. Client configuration file directory path, default value `/etc/taos` on Linux OS, default value `C:/TDengine/cfg` on Windows OS.
- TSDBDriver.PROPERTY_KEY_CHARSET: In the character set used by the client, the default value is the system character set.
- TSDBDriver.PROPERTY_KEY_LOCALE: this only takes effect when using JDBC native connection. Client language environment, the default value is system current locale.
- TSDBDriver.PROPERTY_KEY_TIME_ZONE: only takes effect when using JDBC native connection. In the time zone used by the client, the default value is the system's current time zone.
+- TSDBDriver.HTTP_CONNECT_TIMEOUT: REST connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using JDBC REST connection.
+- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket timeout in milliseconds, the default value is 5000 ms. It only takes effect when using JDBC REST connection and batchfetch is false.
+- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: message transmission timeout in milliseconds, the default value is 3000 ms. It only takes effect when using JDBC REST connection and batchfetch is true.
+- TSDBDriver.PROPERTY_KEY_USE_SSL: connecting Securely Using SSL. true: using SSL conneciton, false: not using SSL connection. It only takes effect when using using JDBC REST connection.
For JDBC native connections, you can specify other parameters, such as log level, SQL length, etc., by specifying URL and Properties. For more detailed configuration, please refer to [Client Configuration](/reference/config/#Client-Only).
### Priority of configuration parameters
@@ -812,11 +820,12 @@ Please refer to: [JDBC example](https://github.com/taosdata/TDengine/tree/develo
## Recent update logs
-| taos-jdbcdriver version | major changes |
-| :---------------------: | :------------------------------------------: |
-| 2.0.38 | JDBC REST connections add bulk pull function |
-| 2.0.37 | Added support for json tags |
-| 2.0.36 | Add support for schemaless writing |
+| taos-jdbcdriver version | major changes |
+| :---------------------: | :--------------------------------------------: |
+| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters |
+| 2.0.38 | JDBC REST connections add bulk pull function |
+| 2.0.37 | Support json tags |
+| 2.0.36 | Support schemaless writing |
## Frequently Asked Questions
diff --git a/docs/en/20-third-party/10-hive-mq-broker.md b/docs/en/20-third-party/10-hive-mq-broker.md
index 333e00fa0e9b724ffbb067a83ad07d0b846b1a23..64404bd63f2368494a665aed192edd3c503a65b0 100644
--- a/docs/en/20-third-party/10-hive-mq-broker.md
+++ b/docs/en/20-third-party/10-hive-mq-broker.md
@@ -3,4 +3,4 @@ sidebar_label: HiveMQ Broker
title: HiveMQ Broker writing
---
-[HiveMQ](https://www.hivemq.com/) is an MQTT broker that provides community and enterprise editions. HiveMQ is mainly for enterprise emerging machine-to-machine M2M communication and internal transport, meeting scalability, ease of management, and security features. HiveMQ provides an open-source plug-in development kit. MQTT data can be saved to TDengine via TDengine extension for HiveMQ. Please refer to the [HiveMQ extension - TDengine documentation](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README_EN.md) for details on how to use it.
\ No newline at end of file
+[HiveMQ](https://www.hivemq.com/) is an MQTT broker that provides community and enterprise editions. HiveMQ is mainly for enterprise emerging machine-to-machine M2M communication and internal transport, meeting scalability, ease of management, and security features. HiveMQ provides an open-source plug-in development kit. MQTT data can be saved to TDengine via TDengine extension for HiveMQ. Please refer to the [HiveMQ extension - TDengine documentation](https://github.com/taosdata/hivemq-tdengine-extension/blob/master/README_EN.md) for details on how to use it.
diff --git a/docs/en/25-application/03-immigrate.md b/docs/en/25-application/03-immigrate.md
index 4d47aec1d76014ba63f6be91004abcc3934769f7..fe67f973894d460fb017de0e1a2099b8441a4abe 100644
--- a/docs/en/25-application/03-immigrate.md
+++ b/docs/en/25-application/03-immigrate.md
@@ -379,11 +379,11 @@ We still use the hypothetical environment from Chapter 4. There are three measur
### Storage resource estimation
-Assuming that the number of sensor devices that generate data and need to be stored is `n`, the frequency of data generation is `t` per second, and the length of each record is `L` bytes, the scale of data generated per day is `n * t * L` bytes. Assuming the compression ratio is `C`, the daily data size is `(n * t * L)/C` bytes. The storage resources are estimated to accommodate the data scale for 1.5 years. In the production environment, the compression ratio C of TDengine is generally between 5 and 7.
+Assuming that the number of sensor devices that generate data and need to be stored is `n`, the frequency of data generation is `t` per second, and the length of each record is `L` bytes, the scale of data generated per day is `86400 * n * t * L` bytes. Assuming the compression ratio is `C`, the daily data size is `(86400 * n * t * L)/C` bytes. The storage resources are estimated to accommodate the data scale for 1.5 years. In the production environment, the compression ratio C of TDengine is generally between 5 and 7.
With additional 20% redundancy, you can calculate the required storage resources:
```matlab
-(n * t * L) * (365 * 1.5) * (1+20%)/C
+(86400 * n * t * L) * (365 * 1.5) * (1+20%)/C
````
Substituting in the above formula, the raw data generated every year is 11.8TB without considering the label information. Note that tag information is associated with each timeline in TDengine, not every record. The amount of data to be recorded is somewhat reduced relative to the generated data, and label data can be ignored as a whole. Assuming a compression ratio of 5, the size of the retained data ends up being 2.56 TB.
diff --git a/docs/examples/java/pom.xml b/docs/examples/java/pom.xml
index a48ba398da92f401235819d067aa2ba6f8b173ea..77c6a3ad60135a023ee1e72c2220e904c1f6313f 100644
--- a/docs/examples/java/pom.xml
+++ b/docs/examples/java/pom.xml
@@ -24,6 +24,16 @@
2.0.38
+
+ org.slf4j
+ slf4j-api
+ 1.7.36
+
+
+ ch.qos.logback
+ logback-classic
+ 1.2.11
+
junit
junit
@@ -31,5 +41,36 @@
test
+
+
+
+ org.apache.maven.plugins
+ maven-surefire-plugin
+ 2.5
+
+ true
+
+
+
+ org.apache.maven.plugins
+ maven-dependency-plugin
+
+
+ copy-dependencies
+ prepare-package
+
+ copy-dependencies
+
+
+ ${project.build.directory}/lib
+ false
+ false
+ true
+
+
+
+
+
+
diff --git a/docs/examples/java/src/main/java/com/taos/example/TestTableNotExits.java b/docs/examples/java/src/main/java/com/taos/example/TestTableNotExits.java
new file mode 100644
index 0000000000000000000000000000000000000000..89fa8eaed5f7fa90bb56e21c7427a9f12fb8fa4e
--- /dev/null
+++ b/docs/examples/java/src/main/java/com/taos/example/TestTableNotExits.java
@@ -0,0 +1,26 @@
+package com.taos.example;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.sql.Statement;
+
+public class TestTableNotExits {
+ private static Connection getConnection() throws SQLException {
+ String jdbcUrl = "jdbc:TAOS://localhost:6030?user=root&password=taosdata";
+ return DriverManager.getConnection(jdbcUrl);
+ }
+ public static void main(String[] args) throws SQLException {
+ try(Connection conn = getConnection()) {
+ try(Statement stmt = conn.createStatement()) {
+ try {
+ stmt.executeUpdate("insert into test.t1 values(1, 2) test.t2 values(3, 4)");
+ } catch (SQLException e) {
+ System.out.println(e.getErrorCode());
+ System.out.println(Integer.toHexString(e.getErrorCode()));
+ System.out.println(e);
+ }
+ }
+ }
+ }
+}
diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java
new file mode 100644
index 0000000000000000000000000000000000000000..04b149a4b96441ecfd1b0bdde54c9ed71349cab2
--- /dev/null
+++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java
@@ -0,0 +1,63 @@
+package com.taos.example.highvolume;
+
+import java.sql.*;
+
+/**
+ * Prepare target database.
+ * Count total records in database periodically so that we can estimate the writing speed.
+ */
+public class DataBaseMonitor {
+ private Connection conn;
+ private Statement stmt;
+
+ public DataBaseMonitor init() throws SQLException {
+ if (conn == null) {
+ String jdbcURL = System.getenv("TDENGINE_JDBC_URL");
+ conn = DriverManager.getConnection(jdbcURL);
+ stmt = conn.createStatement();
+ }
+ return this;
+ }
+
+ public void close() {
+ try {
+ stmt.close();
+ } catch (SQLException e) {
+ }
+ try {
+ conn.close();
+ } catch (SQLException e) {
+ }
+ }
+
+ public void prepareDatabase() throws SQLException {
+ stmt.execute("DROP DATABASE IF EXISTS test");
+ stmt.execute("CREATE DATABASE test");
+ stmt.execute("CREATE STABLE test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)");
+ }
+
+ public Long count() throws SQLException {
+ if (!stmt.isClosed()) {
+ ResultSet result = stmt.executeQuery("SELECT count(*) from test.meters");
+ result.next();
+ return result.getLong(1);
+ }
+ return null;
+ }
+
+ /**
+ * show test.stables;
+ *
+ * name | created_time | columns | tags | tables |
+ * ============================================================================================
+ * meters | 2022-07-20 08:39:30.902 | 4 | 2 | 620000 |
+ */
+ public Long getTableCount() throws SQLException {
+ if (!stmt.isClosed()) {
+ ResultSet result = stmt.executeQuery("show test.stables");
+ result.next();
+ return result.getLong(5);
+ }
+ return null;
+ }
+}
\ No newline at end of file
diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java
new file mode 100644
index 0000000000000000000000000000000000000000..41b59551ca69a4056c2f2b572d169bd08dc4fcfe
--- /dev/null
+++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java
@@ -0,0 +1,70 @@
+package com.taos.example.highvolume;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.sql.*;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.BlockingQueue;
+
+
+public class FastWriteExample {
+ final static Logger logger = LoggerFactory.getLogger(FastWriteExample.class);
+
+ final static int taskQueueCapacity = 1000000;
+ final static List> taskQueues = new ArrayList<>();
+ final static List readTasks = new ArrayList<>();
+ final static List writeTasks = new ArrayList<>();
+ final static DataBaseMonitor databaseMonitor = new DataBaseMonitor();
+
+ public static void stopAll() {
+ logger.info("shutting down");
+ readTasks.forEach(task -> task.stop());
+ writeTasks.forEach(task -> task.stop());
+ databaseMonitor.close();
+ }
+
+ public static void main(String[] args) throws InterruptedException, SQLException {
+ int readTaskCount = args.length > 0 ? Integer.parseInt(args[0]) : 1;
+ int writeTaskCount = args.length > 1 ? Integer.parseInt(args[1]) : 3;
+ int tableCount = args.length > 2 ? Integer.parseInt(args[2]) : 1000;
+ int maxBatchSize = args.length > 3 ? Integer.parseInt(args[3]) : 3000;
+
+ logger.info("readTaskCount={}, writeTaskCount={} tableCount={} maxBatchSize={}",
+ readTaskCount, writeTaskCount, tableCount, maxBatchSize);
+
+ databaseMonitor.init().prepareDatabase();
+
+ // Create task queues, whiting tasks and start writing threads.
+ for (int i = 0; i < writeTaskCount; ++i) {
+ BlockingQueue queue = new ArrayBlockingQueue<>(taskQueueCapacity);
+ taskQueues.add(queue);
+ WriteTask task = new WriteTask(queue, maxBatchSize);
+ Thread t = new Thread(task);
+ t.setName("WriteThread-" + i);
+ t.start();
+ }
+
+ // create reading tasks and start reading threads
+ int tableCountPerTask = tableCount / readTaskCount;
+ for (int i = 0; i < readTaskCount; ++i) {
+ ReadTask task = new ReadTask(i, taskQueues, tableCountPerTask);
+ Thread t = new Thread(task);
+ t.setName("ReadThread-" + i);
+ t.start();
+ }
+
+ Runtime.getRuntime().addShutdownHook(new Thread(FastWriteExample::stopAll));
+
+ long lastCount = 0;
+ while (true) {
+ Thread.sleep(10000);
+ long numberOfTable = databaseMonitor.getTableCount();
+ long count = databaseMonitor.count();
+ logger.info("numberOfTable={} count={} speed={}", numberOfTable, count, (count - lastCount) / 10);
+ lastCount = count;
+ }
+ }
+}
\ No newline at end of file
diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java
new file mode 100644
index 0000000000000000000000000000000000000000..6fe83f002ebcb9d82e026e9a32886fd22bfefbe9
--- /dev/null
+++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java
@@ -0,0 +1,53 @@
+package com.taos.example.highvolume;
+
+import java.util.Iterator;
+
+/**
+ * Generate test data
+ */
+class MockDataSource implements Iterator {
+ private String tbNamePrefix;
+ private int tableCount;
+ private long maxRowsPerTable = 1000000000L;
+
+ // 100 milliseconds between two neighbouring rows.
+ long startMs = System.currentTimeMillis() - maxRowsPerTable * 100;
+ private int currentRow = 0;
+ private int currentTbId = -1;
+
+ // mock values
+ String[] location = {"LosAngeles", "SanDiego", "Hollywood", "Compton", "San Francisco"};
+ float[] current = {8.8f, 10.7f, 9.9f, 8.9f, 9.4f};
+ int[] voltage = {119, 116, 111, 113, 118};
+ float[] phase = {0.32f, 0.34f, 0.33f, 0.329f, 0.141f};
+
+ public MockDataSource(String tbNamePrefix, int tableCount) {
+ this.tbNamePrefix = tbNamePrefix;
+ this.tableCount = tableCount;
+ }
+
+ @Override
+ public boolean hasNext() {
+ currentTbId += 1;
+ if (currentTbId == tableCount) {
+ currentTbId = 0;
+ currentRow += 1;
+ }
+ return currentRow < maxRowsPerTable;
+ }
+
+ @Override
+ public String next() {
+ long ts = startMs + 100 * currentRow;
+ int groupId = currentTbId % 5 == 0 ? currentTbId / 5 : currentTbId / 5 + 1;
+ StringBuilder sb = new StringBuilder(tbNamePrefix + "_" + currentTbId + ","); // tbName
+ sb.append(ts).append(','); // ts
+ sb.append(current[currentRow % 5]).append(','); // current
+ sb.append(voltage[currentRow % 5]).append(','); // voltage
+ sb.append(phase[currentRow % 5]).append(','); // phase
+ sb.append(location[currentRow % 5]).append(','); // location
+ sb.append(groupId); // groupID
+
+ return sb.toString();
+ }
+}
\ No newline at end of file
diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java
new file mode 100644
index 0000000000000000000000000000000000000000..a6fcfed1d28281d46aff493ef9783972858ebe62
--- /dev/null
+++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java
@@ -0,0 +1,58 @@
+package com.taos.example.highvolume;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.BlockingQueue;
+
+class ReadTask implements Runnable {
+ private final static Logger logger = LoggerFactory.getLogger(ReadTask.class);
+ private final int taskId;
+ private final List> taskQueues;
+ private final int queueCount;
+ private final int tableCount;
+ private boolean active = true;
+
+ public ReadTask(int readTaskId, List> queues, int tableCount) {
+ this.taskId = readTaskId;
+ this.taskQueues = queues;
+ this.queueCount = queues.size();
+ this.tableCount = tableCount;
+ }
+
+ /**
+ * Assign data received to different queues.
+ * Here we use the suffix number in table name.
+ * You are expected to define your own rule in practice.
+ *
+ * @param line record received
+ * @return which queue to use
+ */
+ public int getQueueId(String line) {
+ String tbName = line.substring(0, line.indexOf(',')); // For example: tb1_101
+ String suffixNumber = tbName.split("_")[1];
+ return Integer.parseInt(suffixNumber) % this.queueCount;
+ }
+
+ @Override
+ public void run() {
+ logger.info("started");
+ Iterator it = new MockDataSource("tb" + this.taskId, tableCount);
+ try {
+ while (it.hasNext() && active) {
+ String line = it.next();
+ int queueId = getQueueId(line);
+ taskQueues.get(queueId).put(line);
+ }
+ } catch (Exception e) {
+ logger.error("Read Task Error", e);
+ }
+ }
+
+ public void stop() {
+ logger.info("stop");
+ this.active = false;
+ }
+}
\ No newline at end of file
diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java
new file mode 100644
index 0000000000000000000000000000000000000000..c2989acdbe3d0f56d7451ac86051a55955ce14de
--- /dev/null
+++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java
@@ -0,0 +1,205 @@
+package com.taos.example.highvolume;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.sql.*;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * A helper class encapsulate the logic of writing using SQL.
+ *
+ * The main interfaces are two methods:
+ *
+ * {@link SQLWriter#processLine}, which receive raw lines from WriteTask and group them by table names.
+ * {@link SQLWriter#flush}, which assemble INSERT statement and execute it.
+ *
+ *
+ * There is a technical skill worth mentioning: we create table as needed when "table does not exist" error occur instead of creating table automatically using syntax "INSET INTO tb USING stb".
+ * This ensure that checking table existence is a one-time-only operation.
+ *
+ *
+ *
+ */
+public class SQLWriter {
+ final static Logger logger = LoggerFactory.getLogger(SQLWriter.class);
+
+ private Connection conn;
+ private Statement stmt;
+
+ /**
+ * current number of buffered records
+ */
+ private int bufferedCount = 0;
+ /**
+ * Maximum number of buffered records.
+ * Flush action will be triggered if bufferedCount reached this value,
+ */
+ private int maxBatchSize;
+
+
+ /**
+ * Maximum SQL length.
+ */
+ private int maxSQLLength;
+
+ /**
+ * Map from table name to column values. For example:
+ * "tb001" -> "(1648432611249,2.1,114,0.09) (1648432611250,2.2,135,0.2)"
+ */
+ private Map tbValues = new HashMap<>();
+
+ /**
+ * Map from table name to tag values in the same order as creating stable.
+ * Used for creating table.
+ */
+ private Map tbTags = new HashMap<>();
+
+ public SQLWriter(int maxBatchSize) {
+ this.maxBatchSize = maxBatchSize;
+ }
+
+
+ /**
+ * Get Database Connection
+ *
+ * @return Connection
+ * @throws SQLException
+ */
+ private static Connection getConnection() throws SQLException {
+ String jdbcURL = System.getenv("TDENGINE_JDBC_URL");
+ return DriverManager.getConnection(jdbcURL);
+ }
+
+ /**
+ * Create Connection and Statement
+ *
+ * @throws SQLException
+ */
+ public void init() throws SQLException {
+ conn = getConnection();
+ stmt = conn.createStatement();
+ stmt.execute("use test");
+ ResultSet rs = stmt.executeQuery("show variables");
+ while (rs.next()) {
+ String configName = rs.getString(1);
+ if ("maxSQLLength".equals(configName)) {
+ maxSQLLength = Integer.parseInt(rs.getString(2));
+ logger.info("maxSQLLength={}", maxSQLLength);
+ }
+ }
+ }
+
+ /**
+ * Convert raw data to SQL fragments, group them by table name and cache them in a HashMap.
+ * Trigger writing when number of buffered records reached maxBachSize.
+ *
+ * @param line raw data get from task queue in format: tbName,ts,current,voltage,phase,location,groupId
+ */
+ public void processLine(String line) throws SQLException {
+ bufferedCount += 1;
+ int firstComma = line.indexOf(',');
+ String tbName = line.substring(0, firstComma);
+ int lastComma = line.lastIndexOf(',');
+ int secondLastComma = line.lastIndexOf(',', lastComma - 1);
+ String value = "(" + line.substring(firstComma + 1, secondLastComma) + ") ";
+ if (tbValues.containsKey(tbName)) {
+ tbValues.put(tbName, tbValues.get(tbName) + value);
+ } else {
+ tbValues.put(tbName, value);
+ }
+ if (!tbTags.containsKey(tbName)) {
+ String location = line.substring(secondLastComma + 1, lastComma);
+ String groupId = line.substring(lastComma + 1);
+ String tagValues = "('" + location + "'," + groupId + ')';
+ tbTags.put(tbName, tagValues);
+ }
+ if (bufferedCount == maxBatchSize) {
+ flush();
+ }
+ }
+
+
+ /**
+ * Assemble INSERT statement using buffered SQL fragments in Map {@link SQLWriter#tbValues} and execute it.
+ * In case of "Table does not exit" exception, create all tables in the sql and retry the sql.
+ */
+ public void flush() throws SQLException {
+ StringBuilder sb = new StringBuilder("INSERT INTO ");
+ for (Map.Entry entry : tbValues.entrySet()) {
+ String tableName = entry.getKey();
+ String values = entry.getValue();
+ String q = tableName + " values " + values + " ";
+ if (sb.length() + q.length() > maxSQLLength) {
+ executeSQL(sb.toString());
+ logger.warn("increase maxSQLLength or decrease maxBatchSize to gain better performance");
+ sb = new StringBuilder("INSERT INTO ");
+ }
+ sb.append(q);
+ }
+ executeSQL(sb.toString());
+ tbValues.clear();
+ bufferedCount = 0;
+ }
+
+ private void executeSQL(String sql) throws SQLException {
+ try {
+ stmt.executeUpdate(sql);
+ } catch (SQLException e) {
+ // convert to error code defined in taoserror.h
+ int errorCode = e.getErrorCode() & 0xffff;
+ if (errorCode == 0x362 || errorCode == 0x218) {
+ // Table does not exist
+ createTables();
+ executeSQL(sql);
+ } else {
+ logger.error("Execute SQL: {}", sql);
+ throw e;
+ }
+ } catch (Throwable throwable) {
+ logger.error("Execute SQL: {}", sql);
+ throw throwable;
+ }
+ }
+
+ /**
+ * Create tables in batch using syntax:
+ *
+ * CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) [IF NOT EXISTS] tb_name2 USING stb_name TAGS (tag_value2, ...) ...;
+ *
+ */
+ private void createTables() throws SQLException {
+ StringBuilder sb = new StringBuilder("CREATE TABLE ");
+ for (String tbName : tbValues.keySet()) {
+ String tagValues = tbTags.get(tbName);
+ sb.append("IF NOT EXISTS ").append(tbName).append(" USING meters TAGS ").append(tagValues).append(" ");
+ }
+ String sql = sb.toString();
+ try {
+ stmt.executeUpdate(sql);
+ } catch (Throwable throwable) {
+ logger.error("Execute SQL: {}", sql);
+ throw throwable;
+ }
+ }
+
+ public boolean hasBufferedValues() {
+ return bufferedCount > 0;
+ }
+
+ public int getBufferedCount() {
+ return bufferedCount;
+ }
+
+ public void close() {
+ try {
+ stmt.close();
+ } catch (SQLException e) {
+ }
+ try {
+ conn.close();
+ } catch (SQLException e) {
+ }
+ }
+}
\ No newline at end of file
diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/StmtWriter.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/StmtWriter.java
new file mode 100644
index 0000000000000000000000000000000000000000..8ade06625d708a112c85d5657aa00bcd0e605ff4
--- /dev/null
+++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/StmtWriter.java
@@ -0,0 +1,4 @@
+package com.taos.example.highvolume;
+
+public class StmtWriter {
+}
diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java
new file mode 100644
index 0000000000000000000000000000000000000000..de9e5463d7dc59478f991e4783aacaae527b4c4b
--- /dev/null
+++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java
@@ -0,0 +1,58 @@
+package com.taos.example.highvolume;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.BlockingQueue;
+
+class WriteTask implements Runnable {
+ private final static Logger logger = LoggerFactory.getLogger(WriteTask.class);
+ private final int maxBatchSize;
+
+ // the queue from which this writing task get raw data.
+ private final BlockingQueue queue;
+
+ // A flag indicate whether to continue.
+ private boolean active = true;
+
+ public WriteTask(BlockingQueue taskQueue, int maxBatchSize) {
+ this.queue = taskQueue;
+ this.maxBatchSize = maxBatchSize;
+ }
+
+ @Override
+ public void run() {
+ logger.info("started");
+ String line = null; // data getting from the queue just now.
+ SQLWriter writer = new SQLWriter(maxBatchSize);
+ try {
+ writer.init();
+ while (active) {
+ line = queue.poll();
+ if (line != null) {
+ // parse raw data and buffer the data.
+ writer.processLine(line);
+ } else if (writer.hasBufferedValues()) {
+ // write data immediately if no more data in the queue
+ writer.flush();
+ } else {
+ // sleep a while to avoid high CPU usage if no more data in the queue and no buffered records, .
+ Thread.sleep(100);
+ }
+ }
+ if (writer.hasBufferedValues()) {
+ writer.flush();
+ }
+ } catch (Exception e) {
+ String msg = String.format("line=%s, bufferedCount=%s", line, writer.getBufferedCount());
+ logger.error(msg, e);
+ } finally {
+ writer.close();
+ }
+ }
+
+ public void stop() {
+ logger.info("stop");
+ this.active = false;
+ }
+}
\ No newline at end of file
diff --git a/docs/examples/java/src/main/resources/highvolume.drawio b/docs/examples/java/src/main/resources/highvolume.drawio
new file mode 100644
index 0000000000000000000000000000000000000000..410216061813d307b9e8cc289fe58df05c01e390
--- /dev/null
+++ b/docs/examples/java/src/main/resources/highvolume.drawio
@@ -0,0 +1,72 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/examples/java/src/main/resources/highvolume2.drawio b/docs/examples/java/src/main/resources/highvolume2.drawio
new file mode 100644
index 0000000000000000000000000000000000000000..8c9ae090071d93574e98305d3c8e458539a6b50d
--- /dev/null
+++ b/docs/examples/java/src/main/resources/highvolume2.drawio
@@ -0,0 +1,76 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/examples/java/src/main/resources/logback.xml b/docs/examples/java/src/main/resources/logback.xml
new file mode 100644
index 0000000000000000000000000000000000000000..15c6d77de733f650f3f91cb2a3163a563dbcf90f
--- /dev/null
+++ b/docs/examples/java/src/main/resources/logback.xml
@@ -0,0 +1,22 @@
+
+
+
+
+
+ %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n
+
+
+
+ demo.log
+ true
+
+ %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/examples/python/fast_write_example.py b/docs/examples/python/fast_write_example.py
new file mode 100644
index 0000000000000000000000000000000000000000..c9d606388fdecd85f1468f24cc497ecc5941f035
--- /dev/null
+++ b/docs/examples/python/fast_write_example.py
@@ -0,0 +1,180 @@
+# install dependencies:
+# recommend python >= 3.8
+# pip3 install faster-fifo
+#
+
+import logging
+import math
+import sys
+import time
+import os
+from multiprocessing import Process
+from faster_fifo import Queue
+from mockdatasource import MockDataSource
+from queue import Empty
+from typing import List
+
+logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format="%(asctime)s [%(name)s] - %(message)s")
+
+READ_TASK_COUNT = 1
+WRITE_TASK_COUNT = 1
+TABLE_COUNT = 1000
+QUEUE_SIZE = 1000000
+MAX_BATCH_SIZE = 3000
+
+read_processes = []
+write_processes = []
+
+
+def get_connection():
+ """
+ If variable TDENGINE_FIRST_EP is provided then it will be used. If not, firstEP in /etc/taos/taos.cfg will be used.
+ You can also override the default username and password by supply variable TDENGINE_USER and TDENGINE_PASSWORD
+ """
+ import taos
+ firstEP = os.environ.get("TDENGINE_FIRST_EP")
+ if firstEP:
+ host, port = firstEP.split(":")
+ else:
+ host, port = None, 0
+ user = os.environ.get("TDENGINE_USER", "root")
+ password = os.environ.get("TDENGINE_PASSWORD", "taosdata")
+ return taos.connect(host=host, port=int(port), user=user, password=password)
+
+
+# ANCHOR: read
+
+def run_read_task(task_id: int, task_queues: List[Queue]):
+ table_count_per_task = TABLE_COUNT // READ_TASK_COUNT
+ data_source = MockDataSource(f"tb{task_id}", table_count_per_task)
+ try:
+ for batch in data_source:
+ for table_id, rows in batch:
+ # hash data to different queue
+ i = table_id % len(task_queues)
+ # block putting forever when the queue is full
+ task_queues[i].put_many(rows, block=True, timeout=-1)
+ except KeyboardInterrupt:
+ pass
+
+
+# ANCHOR_END: read
+
+# ANCHOR: write
+def run_write_task(task_id: int, queue: Queue):
+ from sql_writer import SQLWriter
+ log = logging.getLogger(f"WriteTask-{task_id}")
+ writer = SQLWriter(get_connection)
+ lines = None
+ try:
+ while True:
+ try:
+ # get as many as possible
+ lines = queue.get_many(block=False, max_messages_to_get=MAX_BATCH_SIZE)
+ writer.process_lines(lines)
+ except Empty:
+ time.sleep(0.01)
+ except KeyboardInterrupt:
+ pass
+ except BaseException as e:
+ log.debug(f"lines={lines}")
+ raise e
+
+
+# ANCHOR_END: write
+
+def set_global_config():
+ argc = len(sys.argv)
+ if argc > 1:
+ global READ_TASK_COUNT
+ READ_TASK_COUNT = int(sys.argv[1])
+ if argc > 2:
+ global WRITE_TASK_COUNT
+ WRITE_TASK_COUNT = int(sys.argv[2])
+ if argc > 3:
+ global TABLE_COUNT
+ TABLE_COUNT = int(sys.argv[3])
+ if argc > 4:
+ global QUEUE_SIZE
+ QUEUE_SIZE = int(sys.argv[4])
+ if argc > 5:
+ global MAX_BATCH_SIZE
+ MAX_BATCH_SIZE = int(sys.argv[5])
+
+
+# ANCHOR: monitor
+def run_monitor_process():
+ log = logging.getLogger("DataBaseMonitor")
+ conn = get_connection()
+ conn.execute("DROP DATABASE IF EXISTS test")
+ conn.execute("CREATE DATABASE test")
+ conn.execute("CREATE STABLE test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) "
+ "TAGS (location BINARY(64), groupId INT)")
+
+ def get_count():
+ res = conn.query("SELECT count(*) FROM test.meters")
+ rows = res.fetch_all()
+ return rows[0][0] if rows else 0
+
+ last_count = 0
+ while True:
+ time.sleep(10)
+ count = get_count()
+ log.info(f"count={count} speed={(count - last_count) / 10}")
+ last_count = count
+
+
+# ANCHOR_END: monitor
+# ANCHOR: main
+def main():
+ set_global_config()
+ logging.info(f"READ_TASK_COUNT={READ_TASK_COUNT}, WRITE_TASK_COUNT={WRITE_TASK_COUNT}, "
+ f"TABLE_COUNT={TABLE_COUNT}, QUEUE_SIZE={QUEUE_SIZE}, MAX_BATCH_SIZE={MAX_BATCH_SIZE}")
+
+ monitor_process = Process(target=run_monitor_process)
+ monitor_process.start()
+ time.sleep(3) # waiting for database ready.
+
+ task_queues: List[Queue] = []
+ # create task queues
+ for i in range(WRITE_TASK_COUNT):
+ queue = Queue(max_size_bytes=QUEUE_SIZE)
+ task_queues.append(queue)
+
+ # create write processes
+ for i in range(WRITE_TASK_COUNT):
+ p = Process(target=run_write_task, args=(i, task_queues[i]))
+ p.start()
+ logging.debug(f"WriteTask-{i} started with pid {p.pid}")
+ write_processes.append(p)
+
+ # create read processes
+ for i in range(READ_TASK_COUNT):
+ queues = assign_queues(i, task_queues)
+ p = Process(target=run_read_task, args=(i, queues))
+ p.start()
+ logging.debug(f"ReadTask-{i} started with pid {p.pid}")
+ read_processes.append(p)
+
+ try:
+ monitor_process.join()
+ except KeyboardInterrupt:
+ monitor_process.terminate()
+ [p.terminate() for p in read_processes]
+ [p.terminate() for p in write_processes]
+ [q.close() for q in task_queues]
+
+
+def assign_queues(read_task_id, task_queues):
+ """
+ Compute target queues for a specific read task.
+ """
+ ratio = WRITE_TASK_COUNT / READ_TASK_COUNT
+ from_index = math.floor(read_task_id * ratio)
+ end_index = math.ceil((read_task_id + 1) * ratio)
+ return task_queues[from_index:end_index]
+
+
+if __name__ == '__main__':
+ main()
+# ANCHOR_END: main
diff --git a/docs/examples/python/highvolume_faster_queue.py b/docs/examples/python/highvolume_faster_queue.py
new file mode 100644
index 0000000000000000000000000000000000000000..14aebc67eee5a0701081f2f5da605184568c3a89
--- /dev/null
+++ b/docs/examples/python/highvolume_faster_queue.py
@@ -0,0 +1,205 @@
+# install dependencies:
+# recommend python >= 3.8
+# pip3 install faster-fifo
+#
+
+import logging
+import sys
+import time
+import os
+from multiprocessing import Process
+from faster_fifo import Queue
+from queue import Empty
+from typing import List
+
+logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format="%(asctime)s [%(name)s] - %(message)s")
+
+READ_TASK_COUNT = 1
+WRITE_TASK_COUNT = 1
+TABLE_COUNT = 1000
+QUEUE_SIZE = 1000000
+MAX_BATCH_SIZE = 3000
+
+read_processes = []
+write_processes = []
+
+
+def get_connection():
+ """
+ If variable TDENGINE_FIRST_EP is provided then it will be used. If not, firstEP in /etc/taos/taos.cfg will be used.
+ You can also override the default username and password by supply variable TDENGINE_USER and TDENGINE_PASSWORD
+ """
+ import taos
+ firstEP = os.environ.get("TDENGINE_FIRST_EP")
+ if firstEP:
+ host, port = firstEP.split(":")
+ else:
+ host, port = None, 0
+ user = os.environ.get("TDENGINE_USER", "root")
+ password = os.environ.get("TDENGINE_PASSWORD", "taosdata")
+ return taos.connect(host=host, port=int(port), user=user, password=password)
+
+
+# ANCHOR: MockDataSource
+class MockDataSource:
+ location = ["LosAngeles", "SanDiego", "Hollywood", "Compton", "San Francisco"]
+ current = [8.8, 10.7, 9.9, 8.9, 9.4]
+ voltage = [119, 116, 111, 113, 118]
+ phase = [0.32, 0.34, 0.33, 0.329, 0.141]
+ max_rows_per_table = 10 ** 9
+
+ def __init__(self, tb_name_prefix, table_count):
+ self.table_name_prefix = tb_name_prefix
+ self.table_count = table_count
+ self.start_ms = round(time.time() * 1000) - self.max_rows_per_table * 100
+
+ def __iter__(self):
+ self.row = 0
+ self.table_id = -1
+ return self
+
+ def __next__(self):
+ """
+ next 100 rows of current table
+ """
+ self.table_id += 1
+ if self.table_id == self.table_count:
+ self.table_id = 0
+ if self.row >= self.max_rows_per_table:
+ raise StopIteration
+ rows = []
+
+ while len(rows) < 100:
+ self.row += 1
+ ts = self.start_ms + 100 * self.row
+ group_id = self.table_id % 5 if self.table_id % 5 == 0 else self.table_id % 5 + 1
+ tb_name = self.table_name_prefix + '_' + str(self.table_id)
+ ri = self.row % 5
+ rows.append(f"{tb_name},{ts},{self.current[ri]},{self.voltage[ri]},{self.phase[ri]},{self.location[ri]},{group_id}")
+ return self.table_id, rows
+
+
+# ANCHOR_END: MockDataSource
+
+# ANCHOR: read
+def run_read_task(task_id: int, task_queues: List[Queue]):
+ table_count_per_task = TABLE_COUNT // READ_TASK_COUNT
+ data_source = MockDataSource(f"tb{task_id}", table_count_per_task)
+ try:
+ for table_id, rows in data_source:
+ # hash data to different queue
+ i = table_id % len(task_queues)
+ # block putting forever when the queue is full
+ task_queues[i].put_many(rows, block=True, timeout=-1)
+ except KeyboardInterrupt:
+ pass
+
+
+# ANCHOR_END: read
+
+# ANCHOR: write
+def run_write_task(task_id: int, queue: Queue):
+ from sql_writer import SQLWriter
+ log = logging.getLogger(f"WriteTask-{task_id}")
+ writer = SQLWriter(get_connection)
+ lines = None
+ try:
+ while True:
+ try:
+ # get as many as possible
+ lines = queue.get_many(block=False, max_messages_to_get=MAX_BATCH_SIZE)
+ writer.process_lines(lines)
+ except Empty:
+ time.sleep(0.01)
+ except KeyboardInterrupt:
+ pass
+ except BaseException as e:
+ log.debug(f"lines={lines}")
+ raise e
+
+
+# ANCHOR_END: write
+
+def set_global_config():
+ argc = len(sys.argv)
+ if argc > 1:
+ global READ_TASK_COUNT
+ READ_TASK_COUNT = int(sys.argv[1])
+ if argc > 2:
+ global WRITE_TASK_COUNT
+ WRITE_TASK_COUNT = int(sys.argv[2])
+ if argc > 3:
+ global TABLE_COUNT
+ TABLE_COUNT = int(sys.argv[3])
+ if argc > 4:
+ global QUEUE_SIZE
+ QUEUE_SIZE = int(sys.argv[4])
+ if argc > 5:
+ global MAX_BATCH_SIZE
+ MAX_BATCH_SIZE = int(sys.argv[5])
+
+
+# ANCHOR: monitor
+def run_monitor_process():
+ import taos
+ log = logging.getLogger("DataBaseMonitor")
+ conn = get_connection()
+ conn.execute("DROP DATABASE IF EXISTS test")
+ conn.execute("CREATE DATABASE test")
+ conn.execute("CREATE STABLE test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) "
+ "TAGS (location BINARY(64), groupId INT)")
+
+ def get_count():
+ res = conn.query("SELECT count(*) FROM test.meters")
+ rows = res.fetch_all()
+ return rows[0][0] if rows else 0
+
+ last_count = 0
+ while True:
+ time.sleep(10)
+ count = get_count()
+ log.info(f"count={count} speed={(count - last_count) / 10}")
+ last_count = count
+
+
+# ANCHOR_END: monitor
+# ANCHOR: main
+def main():
+ set_global_config()
+ logging.info(f"READ_TASK_COUNT={READ_TASK_COUNT}, WRITE_TASK_COUNT={WRITE_TASK_COUNT}, "
+ f"TABLE_COUNT={TABLE_COUNT}, QUEUE_SIZE={QUEUE_SIZE}, MAX_BATCH_SIZE={MAX_BATCH_SIZE}")
+
+ monitor_process = Process(target=run_monitor_process)
+ monitor_process.start()
+ time.sleep(3) # waiting for database ready.
+
+ task_queues: List[Queue] = []
+ # create task queues
+ for i in range(WRITE_TASK_COUNT):
+ queue = Queue(max_size_bytes=QUEUE_SIZE)
+ task_queues.append(queue)
+ # create write processes
+ for i in range(WRITE_TASK_COUNT):
+ p = Process(target=run_write_task, args=(i, task_queues[i]))
+ p.start()
+ logging.debug(f"WriteTask-{i} started with pid {p.pid}")
+ write_processes.append(p)
+ # create read processes
+ for i in range(READ_TASK_COUNT):
+ p = Process(target=run_read_task, args=(i, task_queues))
+ p.start()
+ logging.debug(f"ReadTask-{i} started with pid {p.pid}")
+ read_processes.append(p)
+
+ try:
+ monitor_process.join()
+ except KeyboardInterrupt:
+ monitor_process.terminate()
+ [p.terminate() for p in read_processes]
+ [p.terminate() for p in write_processes]
+ [q.close() for q in task_queues]
+
+
+if __name__ == '__main__':
+ main()
+# ANCHOR_END: main
diff --git a/docs/examples/python/mockdatasource.py b/docs/examples/python/mockdatasource.py
new file mode 100644
index 0000000000000000000000000000000000000000..852860aec0adc8f9b043c9dcd5deb0bf00239201
--- /dev/null
+++ b/docs/examples/python/mockdatasource.py
@@ -0,0 +1,49 @@
+import time
+
+
+class MockDataSource:
+ samples = [
+ "8.8,119,0.32,LosAngeles,0",
+ "10.7,116,0.34,SanDiego,1",
+ "9.9,111,0.33,Hollywood,2",
+ "8.9,113,0.329,Compton,3",
+ "9.4,118,0.141,San Francisco,4"
+ ]
+
+ def __init__(self, tb_name_prefix, table_count):
+ self.table_name_prefix = tb_name_prefix + "_"
+ self.table_count = table_count
+ self.max_rows = 10000000
+ self.current_ts = round(time.time() * 1000) - self.max_rows * 100
+ # [(tableId, tableName, values),]
+ self.data = self._init_data()
+
+ def _init_data(self):
+ lines = self.samples * (self.table_count // 5 + 1)
+ data = []
+ for i in range(self.table_count):
+ table_name = self.table_name_prefix + str(i)
+ data.append((i, table_name, lines[i])) # tableId, row
+ return data
+
+ def __iter__(self):
+ self.row = 0
+ return self
+
+ def __next__(self):
+ """
+ next 1000 rows for each table.
+ return: {tableId:[row,...]}
+ """
+ # generate 1000 timestamps
+ ts = []
+ for _ in range(1000):
+ self.current_ts += 100
+ ts.append(str(self.current_ts))
+ # add timestamp to each row
+ # [(tableId, ["tableName,ts,current,voltage,phase,location,groupId"])]
+ result = []
+ for table_id, table_name, values in self.data:
+ rows = [table_name + ',' + t + ',' + values for t in ts]
+ result.append((table_id, rows))
+ return result
diff --git a/docs/examples/python/sql_writer.py b/docs/examples/python/sql_writer.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb04f85c239af7c4801e2a5ef0483a88b21245ef
--- /dev/null
+++ b/docs/examples/python/sql_writer.py
@@ -0,0 +1,90 @@
+import logging
+import taos
+
+
+class SQLWriter:
+ log = logging.getLogger("SQLWriter")
+
+ def __init__(self, get_connection_func):
+ self._tb_values = {}
+ self._tb_tags = {}
+ self._conn = get_connection_func()
+ self._max_sql_length = self.get_max_sql_length()
+ self._conn.execute("USE test")
+
+ def get_max_sql_length(self):
+ rows = self._conn.query("SHOW variables").fetch_all()
+ for r in rows:
+ name = r[0]
+ if name == "maxSQLLength":
+ return int(r[1])
+ return 1024 * 1024
+
+ def process_lines(self, lines: str):
+ """
+ :param lines: [[tbName,ts,current,voltage,phase,location,groupId]]
+ """
+ for line in lines:
+ ps = line.split(",")
+ table_name = ps[0]
+ value = '(' + ",".join(ps[1:-2]) + ') '
+ if table_name in self._tb_values:
+ self._tb_values[table_name] += value
+ else:
+ self._tb_values[table_name] = value
+
+ if table_name not in self._tb_tags:
+ location = ps[-2]
+ group_id = ps[-1]
+ tag_value = f"('{location}',{group_id})"
+ self._tb_tags[table_name] = tag_value
+ self.flush()
+
+ def flush(self):
+ """
+ Assemble INSERT statement and execute it.
+ When the sql length grows close to MAX_SQL_LENGTH, the sql will be executed immediately, and a new INSERT statement will be created.
+ In case of "Table does not exit" exception, tables in the sql will be created and the sql will be re-executed.
+ """
+ sql = "INSERT INTO "
+ sql_len = len(sql)
+ buf = []
+ for tb_name, values in self._tb_values.items():
+ q = tb_name + " VALUES " + values
+ if sql_len + len(q) >= self._max_sql_length:
+ sql += " ".join(buf)
+ self.execute_sql(sql)
+ sql = "INSERT INTO "
+ sql_len = len(sql)
+ buf = []
+ buf.append(q)
+ sql_len += len(q)
+ sql += " ".join(buf)
+ self.execute_sql(sql)
+ self._tb_values.clear()
+
+ def execute_sql(self, sql):
+ try:
+ self._conn.execute(sql)
+ except taos.Error as e:
+ error_code = e.errno & 0xffff
+ # Table does not exit
+ if error_code == 0x362 or error_code == 0x218:
+ self.create_tables()
+ else:
+ self.log.error("Execute SQL: %s", sql)
+ raise e
+ except BaseException as baseException:
+ self.log.error("Execute SQL: %s", sql)
+ raise baseException
+
+ def create_tables(self):
+ sql = "CREATE TABLE "
+ for tb in self._tb_values.keys():
+ tag_values = self._tb_tags[tb]
+ sql += "IF NOT EXISTS " + tb + " USING meters TAGS " + tag_values + " "
+ try:
+ self._conn.execute(sql)
+ except BaseException as e:
+ self.log.error("Execute SQL: %s", sql)
+ raise e
diff --git a/docs/examples/python/stmt_writer.py b/docs/examples/python/stmt_writer.py
new file mode 100644
index 0000000000000000000000000000000000000000..60846b5a6491491655905008b58e6411818720fb
--- /dev/null
+++ b/docs/examples/python/stmt_writer.py
@@ -0,0 +1,2 @@
+class StmtWriter:
+ pass
diff --git a/docs/zh/02-intro.md b/docs/zh/02-intro.md
index 673c2e96b65814fc1cd572d54f948793ed6fa521..191e1cbcc2921b95f7e312cae2a6d84deaff65fb 100644
--- a/docs/zh/02-intro.md
+++ b/docs/zh/02-intro.md
@@ -52,7 +52,7 @@ TDengine的主要功能如下:
采用 TDengine,可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。表现在几个方面:
1. 由于其超强性能,它能将系统需要的计算资源和存储资源大幅降低
-2. 因为采用 SQL 接口,能与众多第三放软件无缝集成,学习迁移成本大幅下降
+2. 因为采用 SQL 接口,能与众多第三方软件无缝集成,学习迁移成本大幅下降
3. 因为其 All In One 的特性,系统复杂度降低,能降研发成本
4. 因为运维维护简单,运营维护成本能大幅降低
diff --git a/docs/zh/04-concept/index.md b/docs/zh/04-concept/index.md
index 8e97d4a2f43537c1229c8e8ea092ddfc1257dde7..0a0e4a3a2f251a5316f95c5dbb071215d0af35db 100644
--- a/docs/zh/04-concept/index.md
+++ b/docs/zh/04-concept/index.md
@@ -148,7 +148,7 @@ TDengine 建议用数据采集点的名字(如上表中的 D1001)来做表
3. 子表一定属于一张超级表,但普通表不属于任何超级表
4. 普通表无法转为子表,子表也无法转为普通表。
-超级表与与基于超级表建立的子表之间的关系表现在:
+超级表与基于超级表建立的子表之间的关系表现在:
1. 一张超级表包含有多张子表,这些子表具有相同的采集量 schema,但带有不同的标签值。
2. 不能通过子表调整数据或标签的模式,对于超级表的数据模式修改立即对所有的子表生效。
diff --git a/docs/zh/07-develop/03-insert-data/05-high-volume.md b/docs/zh/07-develop/03-insert-data/05-high-volume.md
new file mode 100644
index 0000000000000000000000000000000000000000..6c60fd6e24c590bce90f28b8a0cc7f308f99f801
--- /dev/null
+++ b/docs/zh/07-develop/03-insert-data/05-high-volume.md
@@ -0,0 +1,440 @@
+import Tabs from "@theme/Tabs";
+import TabItem from "@theme/TabItem";
+
+# 高效写入
+
+本节介绍如何高效地向 TDengine 写入数据。
+
+## 高效写入原理 {#principle}
+
+### 客户端程序的角度 {#application-view}
+
+从客户端程序的角度来说,高效写入数据要考虑以下几个因素:
+
+1. 单次写入的数据量。一般来讲,每批次写入的数据量越大越高效(但超过一定阈值其优势会消失)。使用 SQL 写入 TDengine 时,尽量在一条 SQL 中拼接更多数据。目前,TDengine 支持的一条 SQL 的最大长度为 1,048,576(1M)个字符。可通过配置客户端参数 maxSQLLength(默认值为 65480)进行修改。
+2. 并发连接数。一般来讲,同时写入数据的并发连接数越多写入越高效(但超过一定阈值反而会下降,取决于服务端处理能力)。
+3. 数据在不同表(或子表)之间的分布,即要写入数据的相邻性。一般来说,每批次只向同一张表(或子表)写入数据比向多张表(或子表)写入数据要更高效;
+4. 写入方式。一般来讲:
+ - 参数绑定写入比 SQL 写入更高效。因参数绑定方式避免了 SQL 解析。(但增加了 C 接口的调用次数,对于连接器也有性能损耗)。
+ - SQL 写入不自动建表比自动建表更高效。因自动建表要频繁检查表是否存在
+ - SQL 写入比无模式写入更高效。因无模式写入会自动建表且支持动态更改表结构
+
+客户端程序要充分且恰当地利用以上几个因素。在单次写入中尽量只向同一张表(或子表)写入数据,每批次写入的数据量经过测试和调优设定为一个最适合当前系统处理能力的数值,并发写入的连接数同样经过测试和调优后设定为一个最适合当前系统处理能力的数值,以实现在当前系统中的最佳写入速度。
+
+### 数据源的角度 {#datasource-view}
+
+客户端程序通常需要从数据源读数据再写入 TDengine。从数据源角度来说,以下几种情况需要在读线程和写线程之间增加队列:
+
+1. 有多个数据源,单个数据源生成数据的速度远小于单线程写入的速度,但数据量整体比较大。此时队列的作用是把多个数据源的数据汇聚到一起,增加单次写入的数据量。
+2. 单个数据源生成数据的速度远大于单线程写入的速度。此时队列的作用是增加写入的并发度。
+3. 单张表的数据分散在多个数据源。此时队列的作用是将同一张表的数据提前汇聚到一起,提高写入时数据的相邻性。
+
+如果写应用的数据源是 Kafka, 写应用本身即 Kafka 的消费者,则可利用 Kafka 的特性实现高效写入。比如:
+
+1. 将同一张表的数据写到同一个 Topic 的同一个 Partition,增加数据的相邻性
+2. 通过订阅多个 Topic 实现数据汇聚
+3. 通过增加 Consumer 线程数增加写入的并发度
+4. 通过增加每次 fetch 的最大数据量来增加单次写入的最大数据量
+
+### 服务器配置的角度 {#setting-view}
+
+从服务器配置的角度来说,也有很多优化写入性能的方法。
+
+如果总表数不多(远小于核数乘以1000), 且无论怎么调节客户端程序,taosd 进程的 CPU 使用率都很低,那么很可能是因为表在各个 vgroup 分布不均。比如:数据库总表数是 1000 且 minTablesPerVnode 设置的也是 1000,那么所有的表都会分布在 1 个 vgroup 上。此时如果将 minTablesPerVnode 和 tablelncStepPerVnode 都设置成 100, 则可将表分布至 10 个 vgroup。(假设 maxVgroupsPerDb 大于等于 10)。
+
+如果总表数比较大(比如大于500万),适当增加 maxVgroupsPerDb 也能显著提高建表的速度。maxVgroupsPerDb 默认值为 0, 自动配置为 CPU 的核数。 如果表的数量巨大,也建议调节 maxTablesPerVnode 参数,以免超过单个 vnode 建表的上限。
+
+更多调优参数,请参考[性能优化](../../operation/optimize)和[配置参考](../../reference/config)部分。
+
+## 高效写入示例 {#sample-code}
+
+### 场景设计 {#scenario}
+
+下面的示例程序展示了如何高效写入数据,场景设计如下:
+
+- TDengine 客户端程序从其它数据源不断读入数据,在示例程序中采用生成模拟数据的方式来模拟读取数据源
+- 单个连接向 TDengine 写入的速度无法与读数据的速度相匹配,因此客户端程序启动多个线程,每个线程都建立了与 TDengine 的连接,每个线程都有一个独占的固定大小的消息队列
+- 客户端程序将接收到的数据根据所属的表名(或子表名)HASH 到不同的线程,即写入该线程所对应的消息队列,以此确保属于某个表(或子表)的数据一定会被一个固定的线程处理
+- 各个子线程在将所关联的消息队列中的数据读空后或者读取数据量达到一个预定的阈值后将该批数据写入 TDengine,并继续处理后面接收到的数据
+
+
+
+### 示例代码 {#code}
+
+这一部分是针对以上场景的示例代码。对于其它场景高效写入原理相同,不过代码需要适当修改。
+
+本示例代码假设源数据属于同一张超级表(meters)的不同子表。程序在开始写入数据之前已经在 test 库创建了这个超级表。对于子表,将根据收到的数据,由应用程序自动创建。如果实际场景是多个超级表,只需修改写任务自动建表的代码。
+
+
+
+
+**程序清单**
+
+| 类名 | 功能说明 |
+| ---------------- | --------------------------------------------------------------------------- |
+| FastWriteExample | 主程序 |
+| ReadTask | 从模拟源中读取数据,将表名经过 hash 后得到 Queue 的 index,写入对应的 Queue |
+| WriteTask | 从 Queue 中获取数据,组成一个 Batch,写入 TDengine |
+| MockDataSource | 模拟生成一定数量 meters 子表的数据 |
+| SQLWriter | WriteTask 依赖这个类完成 SQL 拼接、自动建表、 SQL 写入、SQL 长度检查 |
+| StmtWriter | 实现参数绑定方式批量写入(暂未完成) |
+| DataBaseMonitor | 统计写入速度,并每隔 10 秒把当前写入速度打印到控制台 |
+
+
+以下是各类的完整代码和更详细的功能说明。
+
+
+FastWriteExample
+主程序负责:
+
+1. 创建消息队列
+2. 启动写线程
+3. 启动读线程
+4. 每隔 10 秒统计一次写入速度
+
+主程序默认暴露了 4 个参数,每次启动程序都可调节,用于测试和调优:
+
+1. 读线程个数。默认为 1。
+2. 写线程个数。默认为 3。
+3. 模拟生成的总表数。默认为 1000。将会平分给各个读线程。如果总表数较大,建表需要花费较长,开始统计的写入速度可能较慢。
+4. 每批最多写入记录数量。默认为 3000。
+
+队列容量(taskQueueCapacity)也是与性能有关的参数,可通过修改程序调节。一般来讲,队列容量越大,入队被阻塞的概率越小,队列的吞吐量越大,但是内存占用也会越大。 示例程序默认值已经设置地足够大。
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java}}
+```
+
+
+
+
+ReadTask
+
+读任务负责从数据源读数据。每个读任务都关联了一个模拟数据源。每个模拟数据源可生成一点数量表的数据。不同的模拟数据源生成不同表的数据。
+
+读任务采用阻塞的方式写消息队列。也就是说,一旦队列满了,写操作就会阻塞。
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java}}
+```
+
+
+
+
+WriteTask
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java}}
+```
+
+
+
+
+
+MockDataSource
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java}}
+```
+
+
+
+
+
+SQLWriter
+
+SQLWriter 类封装了拼 SQL 和写数据的逻辑。注意,所有的表都没有提前创建,而是在 catch 到表不存在异常的时候,再以超级表为模板批量建表,然后重新执行 INSERT 语句。对于其它异常,这里简单地记录当时执行的 SQL 语句到日志中,你也可以记录更多线索到日志,已便排查错误和故障恢复。
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java}}
+```
+
+
+
+
+
+DataBaseMonitor
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java}}
+```
+
+
+
+**执行步骤**
+
+
+执行 Java 示例程序
+
+执行程序前需配置环境变量 `TDENGINE_JDBC_URL`。如果 TDengine Server 部署在本机,且用户名、密码和端口都是默认值,那么可配置:
+
+```
+TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata"
+```
+
+**本地集成开发环境执行示例程序**
+
+1. clone TDengine 仓库
+ ```
+ git clone git@github.com:taosdata/TDengine.git --depth 1
+ ```
+2. 用集成开发环境打开 `docs/examples/java` 目录。
+3. 在开发环境中配置环境变量 `TDENGINE_JDBC_URL`。如果已配置了全局的环境变量 `TDENGINE_JDBC_URL` 可跳过这一步。
+4. 运行类 `com.taos.example.highvolume.FastWriteExample`。
+
+**远程服务器上执行示例程序**
+
+若要在服务器上执行示例程序,可按照下面的步骤操作:
+
+1. 打包示例代码。在目录 TDengine/docs/examples/java 下执行:
+ ```
+ mvn package
+ ```
+2. 远程服务器上创建 examples 目录:
+ ```
+ mkdir -p examples/java
+ ```
+3. 复制依赖到服务器指定目录:
+ - 复制依赖包,只用复制一次
+ ```
+ scp -r .\target\lib @:~/examples/java
+ ```
+ - 复制本程序的 jar 包,每次更新代码都需要复制
+ ```
+ scp -r .\target\javaexample-1.0.jar @:~/examples/java
+ ```
+4. 配置环境变量。
+ 编辑 `~/.bash_profile` 或 `~/.bashrc` 添加如下内容例如:
+
+ ```
+ export TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata"
+ ```
+
+ 以上使用的是本地部署 TDengine Server 时默认的 JDBC URL。你需要根据自己的实际情况更改。
+
+5. 用 java 命令启动示例程序,命令模板:
+
+ ```
+ java -classpath lib/*:javaexample-1.0.jar com.taos.example.highvolume.FastWriteExample
+ ```
+
+6. 结束测试程序。测试程序不会自动结束,在获取到当前配置下稳定的写入速度后,按 CTRL + C 结束程序。
+ 下面是一次实际运行的日志输出,机器配置 16核 + 64G + 固态硬盘。
+
+ ```
+ root@vm85$ java -classpath lib/*:javaexample-1.0.jar com.taos.example.highvolume.FastWriteExample 2 12
+ 18:56:35.896 [main] INFO c.t.e.highvolume.FastWriteExample - readTaskCount=2, writeTaskCount=12 tableCount=1000 maxBatchSize=3000
+ 18:56:36.011 [WriteThread-0] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.015 [WriteThread-0] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.021 [WriteThread-1] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.022 [WriteThread-1] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.031 [WriteThread-2] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.032 [WriteThread-2] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.041 [WriteThread-3] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.042 [WriteThread-3] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.093 [WriteThread-4] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.094 [WriteThread-4] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.099 [WriteThread-5] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.100 [WriteThread-5] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.100 [WriteThread-6] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.101 [WriteThread-6] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.103 [WriteThread-7] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.104 [WriteThread-7] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.105 [WriteThread-8] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.107 [WriteThread-8] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.108 [WriteThread-9] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.109 [WriteThread-9] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.156 [WriteThread-10] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.157 [WriteThread-11] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.158 [WriteThread-10] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.158 [ReadThread-0] INFO com.taos.example.highvolume.ReadTask - started
+ 18:56:36.158 [ReadThread-1] INFO com.taos.example.highvolume.ReadTask - started
+ 18:56:36.158 [WriteThread-11] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:46.369 [main] INFO c.t.e.highvolume.FastWriteExample - count=18554448 speed=1855444
+ 18:56:56.946 [main] INFO c.t.e.highvolume.FastWriteExample - count=39059660 speed=2050521
+ 18:57:07.322 [main] INFO c.t.e.highvolume.FastWriteExample - count=59403604 speed=2034394
+ 18:57:18.032 [main] INFO c.t.e.highvolume.FastWriteExample - count=80262938 speed=2085933
+ 18:57:28.432 [main] INFO c.t.e.highvolume.FastWriteExample - count=101139906 speed=2087696
+ 18:57:38.921 [main] INFO c.t.e.highvolume.FastWriteExample - count=121807202 speed=2066729
+ 18:57:49.375 [main] INFO c.t.e.highvolume.FastWriteExample - count=142952417 speed=2114521
+ 18:58:00.689 [main] INFO c.t.e.highvolume.FastWriteExample - count=163650306 speed=2069788
+ 18:58:11.646 [main] INFO c.t.e.highvolume.FastWriteExample - count=185019808 speed=2136950
+ ```
+
+
+
+
+
+
+**程序清单**
+
+Python 示例程序中采用了多进程的架构,并使用了跨进程的消息队列。
+
+| 函数或类 | 功能说明 |
+| ------------------------ | -------------------------------------------------------------------- |
+| main 函数 | 程序入口, 创建各个子进程和消息队列 |
+| run_monitor_process 函数 | 创建数据库,超级表,统计写入速度并定时打印到控制台 |
+| run_read_task 函数 | 读进程主要逻辑,负责从其它数据系统读数据,并分发数据到为之分配的队列 |
+| MockDataSource 类 | 模拟数据源, 实现迭代器接口,每次批量返回每张表的接下来 1000 条数据 |
+| run_write_task 函数 | 写进程主要逻辑。每次从队列中取出尽量多的数据,并批量写入 |
+| SQLWriter类 | SQL 写入和自动建表 |
+| StmtWriter 类 | 实现参数绑定方式批量写入(暂未完成) |
+
+
+
+main 函数
+
+main 函数负责创建消息队列和启动子进程,子进程有 3 类:
+
+1. 1 个监控进程,负责数据库初始化和统计写入速度
+2. n 个读进程,负责从其它数据系统读数据
+3. m 个写进程,负责写数据库
+
+main 函数可以接收 5 个启动参数,依次是:
+
+1. 读任务(进程)数, 默认为 1
+2. 写任务(进程)数, 默认为 1
+3. 模拟生成的总表数,默认为 1000
+4. 队列大小(单位字节),默认为 1000000
+5. 每批最多写入记录数量, 默认为 3000
+
+```python
+{{#include docs/examples/python/fast_write_example.py:main}}
+```
+
+
+
+
+run_monitor_process
+
+监控进程负责初始化数据库,并监控当前的写入速度。
+
+```python
+{{#include docs/examples/python/fast_write_example.py:monitor}}
+```
+
+
+
+
+
+run_read_task 函数
+
+读进程,负责从其它数据系统读数据,并分发数据到为之分配的队列。
+
+```python
+{{#include docs/examples/python/fast_write_example.py:read}}
+```
+
+
+
+
+
+MockDataSource
+
+以下是模拟数据源的实现,我们假设数据源生成的每一条数据都带有目标表名信息。实际中你可能需要一定的规则确定目标表名。
+
+```python
+{{#include docs/examples/python/mockdatasource.py}}
+```
+
+
+
+
+run_write_task 函数
+
+写进程每次从队列中取出尽量多的数据,并批量写入。
+
+```python
+{{#include docs/examples/python/fast_write_example.py:write}}
+```
+
+
+
+
+
+SQLWriter 类封装了拼 SQL 和写数据的逻辑。所有的表都没有提前创建,而是在发生表不存在错误的时候,再以超级表为模板批量建表,然后重新执行 INSERT 语句。对于其它错误会记录当时执行的 SQL, 以便排查错误和故障恢复。这个类也对 SQL 是否超过最大长度限制做了检查,如果接近 SQL 最大长度限制(maxSQLLength),将会立即执行 SQL。为了减少 SQL 此时,建议将 maxSQLLength 适当调大。
+
+SQLWriter
+
+```python
+{{#include docs/examples/python/sql_writer.py}}
+```
+
+
+
+**执行步骤**
+
+
+
+执行 Python 示例程序
+
+1. 前提条件
+
+ - 已安装 TDengine 客户端驱动
+ - 已安装 Python3, 推荐版本 >= 3.8
+ - 已安装 taospy
+
+2. 安装 faster-fifo 代替 python 内置的 multiprocessing.Queue
+
+ ```
+ pip3 install faster-fifo
+ ```
+
+3. 点击上面的“查看源码”链接复制 `fast_write_example.py` 、 `sql_writer.py` 和 `mockdatasource.py` 三个文件。
+
+4. 执行示例程序
+
+ ```
+ python3 fast_write_example.py
+ ```
+
+ 下面是一次实际运行的输出, 机器配置 16核 + 64G + 固态硬盘。
+
+ ```
+ root@vm85$ python3 fast_write_example.py 8 8
+ 2022-07-14 19:13:45,869 [root] - READ_TASK_COUNT=8, WRITE_TASK_COUNT=8, TABLE_COUNT=1000, QUEUE_SIZE=1000000, MAX_BATCH_SIZE=3000
+ 2022-07-14 19:13:48,882 [root] - WriteTask-0 started with pid 718347
+ 2022-07-14 19:13:48,883 [root] - WriteTask-1 started with pid 718348
+ 2022-07-14 19:13:48,884 [root] - WriteTask-2 started with pid 718349
+ 2022-07-14 19:13:48,884 [root] - WriteTask-3 started with pid 718350
+ 2022-07-14 19:13:48,885 [root] - WriteTask-4 started with pid 718351
+ 2022-07-14 19:13:48,885 [root] - WriteTask-5 started with pid 718352
+ 2022-07-14 19:13:48,886 [root] - WriteTask-6 started with pid 718353
+ 2022-07-14 19:13:48,886 [root] - WriteTask-7 started with pid 718354
+ 2022-07-14 19:13:48,887 [root] - ReadTask-0 started with pid 718355
+ 2022-07-14 19:13:48,888 [root] - ReadTask-1 started with pid 718356
+ 2022-07-14 19:13:48,889 [root] - ReadTask-2 started with pid 718357
+ 2022-07-14 19:13:48,889 [root] - ReadTask-3 started with pid 718358
+ 2022-07-14 19:13:48,890 [root] - ReadTask-4 started with pid 718359
+ 2022-07-14 19:13:48,891 [root] - ReadTask-5 started with pid 718361
+ 2022-07-14 19:13:48,892 [root] - ReadTask-6 started with pid 718364
+ 2022-07-14 19:13:48,893 [root] - ReadTask-7 started with pid 718365
+ 2022-07-14 19:13:56,042 [DataBaseMonitor] - count=6676310 speed=667631.0
+ 2022-07-14 19:14:06,196 [DataBaseMonitor] - count=20004310 speed=1332800.0
+ 2022-07-14 19:14:16,366 [DataBaseMonitor] - count=32290310 speed=1228600.0
+ 2022-07-14 19:14:26,527 [DataBaseMonitor] - count=44438310 speed=1214800.0
+ 2022-07-14 19:14:36,673 [DataBaseMonitor] - count=56608310 speed=1217000.0
+ 2022-07-14 19:14:46,834 [DataBaseMonitor] - count=68757310 speed=1214900.0
+ 2022-07-14 19:14:57,280 [DataBaseMonitor] - count=80992310 speed=1223500.0
+ 2022-07-14 19:15:07,689 [DataBaseMonitor] - count=93805310 speed=1281300.0
+ 2022-07-14 19:15:18,020 [DataBaseMonitor] - count=106111310 speed=1230600.0
+ 2022-07-14 19:15:28,356 [DataBaseMonitor] - count=118394310 speed=1228300.0
+ 2022-07-14 19:15:38,690 [DataBaseMonitor] - count=130742310 speed=1234800.0
+ 2022-07-14 19:15:49,000 [DataBaseMonitor] - count=143051310 speed=1230900.0
+ 2022-07-14 19:15:59,323 [DataBaseMonitor] - count=155276310 speed=1222500.0
+ 2022-07-14 19:16:09,649 [DataBaseMonitor] - count=167603310 speed=1232700.0
+ 2022-07-14 19:16:19,995 [DataBaseMonitor] - count=179976310 speed=1237300.0
+ ```
+
+
+
+:::note
+使用 Python 连接器多进程连接 TDengine 的时候,有一个限制:不能在父进程中建立连接,所有连接只能在子进程中创建。
+如果在父进程中创建连接,子进程再创建连接就会一直阻塞。这是个已知问题。
+
+:::
+
+
+
+
+
diff --git a/docs/zh/07-develop/03-insert-data/highvolume.webp b/docs/zh/07-develop/03-insert-data/highvolume.webp
new file mode 100644
index 0000000000000000000000000000000000000000..46dfc74ae3b0043c591ff930c62251da49cae7ad
Binary files /dev/null and b/docs/zh/07-develop/03-insert-data/highvolume.webp differ
diff --git a/docs/zh/12-taos-sql/02-database.md b/docs/zh/12-taos-sql/02-database.md
index 566fec324148fede8d897869656b83e657569f59..e3a0aa7c87461fd1621a38093871a1542e3dbf98 100644
--- a/docs/zh/12-taos-sql/02-database.md
+++ b/docs/zh/12-taos-sql/02-database.md
@@ -32,7 +32,6 @@ CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1];
- cacheLast: [详细说明](/reference/config/#cachelast)
- replica: [详细说明](/reference/config/#replica)
- quorum: [详细说明](/reference/config/#quorum)
- - maxVgroupsPerDb: [详细说明](/reference/config/#maxvgroupsperdb)
- comp: [详细说明](/reference/config/#comp)
- precision: [详细说明](/reference/config/#precision)
6. 请注意上面列出的所有参数都可以配置在配置文件 `taosd.cfg` 中作为创建数据库时使用的默认配置, `create database` 的参数中明确指定的会覆盖配置文件中的设置。
diff --git a/docs/zh/14-reference/03-connector/java.mdx b/docs/zh/14-reference/03-connector/java.mdx
index ddab9e5f24c64e51e82cad6e299f3ea0d741b349..f7bd540088f28528f36e63e13b2c4917f497c3bc 100644
--- a/docs/zh/14-reference/03-connector/java.mdx
+++ b/docs/zh/14-reference/03-connector/java.mdx
@@ -93,7 +93,7 @@ Maven 项目中,在 pom.xml 中添加以下依赖:
可以通过下载 TDengine 的源码,自己编译最新版本的 Java connector
```shell
-git clone https://github.com/taosdata/taos-connector-jdbc.git
+git clone https://github.com/taosdata/taos-connector-jdbc.git --branch 2.0
cd taos-connector-jdbc
mvn clean install -Dmaven.test.skip=true
```
@@ -201,6 +201,10 @@ url 中的配置参数如下:
- batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。逐行拉取结果集使用 HTTP 方式进行数据传输。从 taos-jdbcdriver-2.0.38 和 TDengine 2.4.0.12 版本开始,JDBC REST 连接增加批量拉取数据功能。taos-jdbcdriver 与 TDengine 之间通过 WebSocket 连接进行数据传输。相较于 HTTP,WebSocket 可以使 JDBC REST 连接支持大数据量查询,并提升查询性能。
- charset: 当开启批量拉取数据时,指定解析字符串数据的字符集。
- batchErrorIgnore:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败,继续执行下面的 SQL 了。false:不再执行失败 SQL 后的任何语句。默认值为:false。
+- httpConnectTimeout: 连接超时时间,单位 ms, 默认值为 5000。
+- httpSocketTimeout: socket 超时时间,单位 ms,默认值为 5000。仅在 batchfetch 设置为 false 时生效。
+- messageWaitTimeout: 消息超时时间, 单位 ms, 默认值为 3000。 仅在 batchfetch 设置为 true 时生效。
+- useSSL: 连接中是否使用 SSL。
**注意**:部分配置项(比如:locale、timezone)在 REST 连接中不生效。
@@ -264,7 +268,11 @@ properties 中的配置参数如下:
- TSDBDriver.PROPERTY_KEY_CHARSET:客户端使用的字符集,默认值为系统字符集。
- TSDBDriver.PROPERTY_KEY_LOCALE:仅在使用 JDBC 原生连接时生效。 客户端语言环境,默认值系统当前 locale。
- TSDBDriver.PROPERTY_KEY_TIME_ZONE:仅在使用 JDBC 原生连接时生效。 客户端使用的时区,默认值为系统当前时区。
-- 此外对 JDBC 原生连接,通过指定 URL 和 Properties 还可以指定其他参数,比如日志级别、SQL 长度等。更多详细配置请参考[客户端配置](/reference/config/#仅客户端适用)。
+- TSDBDriver.HTTP_CONNECT_TIMEOUT: 连接超时时间,单位 ms, 默认值为 5000。仅在 REST 连接时生效。
+- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket 超时时间,单位 ms,默认值为 5000。仅在 REST 连接且 batchfetch 设置为 false 时生效。
+- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: 消息超时时间, 单位 ms, 默认值为 3000。 仅在 REST 连接且 batchfetch 设置为 true 时生效。
+- TSDBDriver.PROPERTY_KEY_USE_SSL: 连接中是否使用 SSL。仅在 REST 连接时生效。
+ 此外对 JDBC 原生连接,通过指定 URL 和 Properties 还可以指定其他参数,比如日志级别、SQL 长度等。更多详细配置请参考[客户端配置](/reference/config/#仅客户端适用)。
### 配置参数的优先级
@@ -368,7 +376,7 @@ public class ParameterBindingDemo {
private static final String host = "127.0.0.1";
private static final Random random = new Random(System.currentTimeMillis());
- private static final int BINARY_COLUMN_SIZE = 20;
+ private static final int BINARY_COLUMN_SIZE = 30;
private static final String[] schemaList = {
"create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)",
"create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)",
@@ -809,6 +817,7 @@ Query OK, 1 row(s) in set (0.000141s)
| taos-jdbcdriver 版本 | 主要变化 |
| :------------------: | :----------------------------: |
+| 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 |
| 2.0.38 | JDBC REST 连接增加批量拉取功能 |
| 2.0.37 | 增加对 json tag 支持 |
| 2.0.36 | 增加对 schemaless 写入支持 |
diff --git a/docs/zh/20-third-party/10-hive-mq-broker.md b/docs/zh/20-third-party/10-hive-mq-broker.md
index f75ed793d6272ae27f92676e2096ef455f638aa6..1944b97cb05103d888bebba48998b163135dc50c 100644
--- a/docs/zh/20-third-party/10-hive-mq-broker.md
+++ b/docs/zh/20-third-party/10-hive-mq-broker.md
@@ -3,4 +3,4 @@ sidebar_label: HiveMQ Broker
title: HiveMQ Broker 写入
---
-[HiveMQ](https://www.hivemq.com/) 是一个提供免费个人版和企业版的 MQTT 代理,主要用于企业和新兴的机器到机器 M2M 通讯和内部传输,满足可伸缩性、易管理和安全特性。HiveMQ 提供了开源的插件开发包。可以通过 HiveMQ extension - TDengine 保存数据到 TDengine。详细使用方法请参考 [HiveMQ extension - TDengine 说明文档](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README.md)。
+[HiveMQ](https://www.hivemq.com/) 是一个提供免费个人版和企业版的 MQTT 代理,主要用于企业和新兴的机器到机器 M2M 通讯和内部传输,满足可伸缩性、易管理和安全特性。HiveMQ 提供了开源的插件开发包。可以通过 HiveMQ extension - TDengine 保存数据到 TDengine。详细使用方法请参考 [HiveMQ extension - TDengine 说明文档](https://github.com/taosdata/hivemq-tdengine-extension/blob/master/README.md)。
diff --git a/docs/zh/21-tdinternal/03-taosd.md b/docs/zh/21-tdinternal/03-taosd.md
index 0cf0a1aaa222e82f7ca6cc4f0314aa5a50442924..9470311f94a3247bb51db2b2f0d1ad1876d04995 100644
--- a/docs/zh/21-tdinternal/03-taosd.md
+++ b/docs/zh/21-tdinternal/03-taosd.md
@@ -88,7 +88,7 @@ TSDB 中存储的元数据包含属于其所在的 vnode 中表的类型,schem
## Query 模块
-该模块负责整体系统的查询处理。客户端调用该该模块进行 SQL 语法解析,并将查询或写入请求发送到 vnode ,同时负责针对超级表的查询进行二阶段的聚合操作。在 vnode 端,该模块调用 TSDB 模块读取系统中存储的数据进行查询处理。query 模块还定义了系统能够支持的全部查询函数,查询函数的实现机制与查询框架无耦合,可以在不修改查询流程的情况下动态增加查询函数。详细的设计请参见《TDengine 2.0 查询模块设计》。
+该模块负责整体系统的查询处理。客户端调用该该模块进行 SQL 语法解析,并将查询或写入请求发送到 vnode ,同时负责针对超级表的查询进行二阶段的聚合操作。在 vnode 端,该模块调用 TSDB 模块读取系统中存储的数据进行查询处理。query 模块还定义了系统能够支持的全部查询函数,查询函数的实现机制与查询框架无耦合,可以在不修改查询流程的情况下动态增加查询函数。
## SYNC 模块
diff --git a/docs/zh/25-application/03-immigrate.md b/docs/zh/25-application/03-immigrate.md
index 9d8946bc4a69639c5327ac1ffb6c0539ddbd0e63..d1c9caea099b79494784aa1122e89d7b4d412464 100644
--- a/docs/zh/25-application/03-immigrate.md
+++ b/docs/zh/25-application/03-immigrate.md
@@ -367,10 +367,10 @@ WHERE ts>=1510560000 AND ts<=1515000009
### 存储资源估算
-假设产生数据并需要存储的传感器设备数量为 `n`,数据生成的频率为`t`条/秒,每条记录的长度为 `L` bytes,则每天产生的数据规模为 `n×t×L` bytes。假设压缩比为 C,则每日产生数据规模为 `(n×t×L)/C` bytes。存储资源预估为能够容纳 1.5 年的数据规模,生产环境下 TDengine 的压缩比 C 一般在 5 ~ 7 之间,同时为最后结果增加 20% 的冗余,可计算得到需要存储资源:
+假设产生数据并需要存储的传感器设备数量为 `n`,数据生成的频率为`t`条/秒,每条记录的长度为 `L` bytes,则每天产生的数据规模为 `86400×n×t×L` bytes。假设压缩比为 C,则每日产生数据规模为 `(86400×n×t×L)/C` bytes。存储资源预估为能够容纳 1.5 年的数据规模,生产环境下 TDengine 的压缩比 C 一般在 5 ~ 7 之间,同时为最后结果增加 20% 的冗余,可计算得到需要存储资源:
```matlab
-(n×t×L)×(365×1.5)×(1+20%)/C
+(86400×n×t×L)×(365×1.5)×(1+20%)/C
```
结合以上的计算公式,将参数带入计算公式,在不考虑标签信息的情况下,每年产生的原始数据规模是 11.8TB。需要注意的是,由于标签信息在 TDengine 中关联到每个时间线,并不是每条记录。所以需要记录的数据量规模相对于产生的数据有一定的降低,而这部分标签数据整体上可以忽略不记。假设压缩比为 5,则保留的数据规模最终为 2.56 TB。
diff --git a/docs/zh/27-train-faq/01-faq.md b/docs/zh/27-train-faq/01-faq.md
index e8a106d5d682948d97029cf36b7a47677a491804..bb92c69c3f99fff4043f872447eda8ea633f53e8 100644
--- a/docs/zh/27-train-faq/01-faq.md
+++ b/docs/zh/27-train-faq/01-faq.md
@@ -239,3 +239,25 @@ taosAdapter 从 TDengine 2.4.0.0 版本开始成为 TDengine 服务端软件的
OOM 是操作系统的保护机制,当操作系统内存(包括 SWAP )不足时,会杀掉某些进程,从而保证操作系统的稳定运行。通常内存不足主要是如下两个原因导致,一是剩余内存小于 vm.min_free_kbytes ;二是程序请求的内存大于剩余内存。还有一种情况是内存充足但程序占用了特殊的内存地址,也会触发 OOM 。
TDengine 会预先为每个 VNode 分配好内存,每个 Database 的 VNode 个数受 maxVgroupsPerDb 影响,每个 VNode 占用的内存大小受 Blocks 和 Cache 影响。要防止 OOM,需要在项目建设之初合理规划内存,并合理设置 SWAP ,除此之外查询过量的数据也有可能导致内存暴涨,这取决于具体的查询语句。TDengine 企业版对内存管理做了优化,采用了新的内存分配器,对稳定性有更高要求的用户可以考虑选择企业版。
+
+### 26. 为何批量写入数据时,时间戳使用 NOW 函数拼接会导致数据丢失?
+
+首先需要强调一个概念,TDengine 作为一个时序数据库(Time-Series Database),首个时间戳字段起到主键的作用,内存索引的构建、磁盘数据的存储与其密切相关,不能有重复的时间戳。
+
+NOW 函数(以及 NOW 关键字)返回客户端当前时间。当执行批量写入时,若首列时间戳给的值都是 NOW,在数据库默认毫秒的时间级别下是区分不开的(建库时可选择更高的时间精度),后续写入的重复时间戳将会丢失或更新,处理重复时间戳的具体逻辑由在 TDengine 中建库时的 Update 参数决定。
+
+### 27. 扩容集群后,DNode 状态为 Offline 怎么办?
+
+新的节点正常加入集群后,数据节点列表中会显示该节点处于 Ready 状态。若该节点状态为 Offline,可按照如下内容进行排查:
+
+1. 查看该节点 taosd 是否已启动、防火墙是否关闭;
+2. 确认新增节点的数据文件夹是否清空;
+3. 检查所有节点 /etc/hosts 域名解析是否完整、有效(需要有所有节点的解析,包括 arbitrator);
+4. 该节点 firstEP、fqdn 参数是否正确配置。
+
+### 28. 能提供 TDengine 的建模实例吗?
+
+在社区支持的过程中,能发现很多新手小伙伴在部署 TDengine 后不知道如何进一步体验,我们的建议是跑一跑官网文档的语句。文档内容较多,为了方便新手小伙伴快速上手,我们将官网文档的示例模型浓缩、汇总了一下,希望尽可能快的让大家了解 TDengine 建模方法:[建模入门](https://github.com/taosdata/tdengine-modeling-and-querying-101/blob/main/cases/001-electricity-meter-monitoring.zh-hans.md)
+
+同时也欢迎社区的用户们为仓库 [tdengine-modeling-and-querying-101](https://github.com/taosdata/tdengine-modeling-and-querying-101) 提交 PR,展现 TDengine 在各行各业的建模实例。
+
diff --git a/examples/C#/C#checker/C#checker.cs b/examples/C#/C#checker/C#checker.cs
index 7d0b6a50b673278ac6982a97de7eb31ce76761b6..f49fda88cdd8d298f2253bb8f47ccce58c3b0118 100644
--- a/examples/C#/C#checker/C#checker.cs
+++ b/examples/C#/C#checker/C#checker.cs
@@ -389,7 +389,7 @@ namespace TDengineDriver
static void ExitProgram()
{
- System.Environment.Exit(0);
+ System.Environment.Exit(1);
}
public void cleanup()
diff --git a/examples/C#/insertCn/lib/ResultSetUtils.cs b/examples/C#/insertCn/lib/ResultSetUtils.cs
new file mode 100644
index 0000000000000000000000000000000000000000..7d299411ee68067fca9b8cc5fc8c38e53510fa5d
--- /dev/null
+++ b/examples/C#/insertCn/lib/ResultSetUtils.cs
@@ -0,0 +1,43 @@
+using System;
+using TDengineDriver;
+using System.Runtime.InteropServices;
+using System.Text;
+using System.Collections.Generic;
+namespace Test.UtilsTools.ResultSet
+{
+ public class ResultSet
+ {
+ private List resultMeta;
+ private List resultData;
+ // private bool isValidResult = false;
+ public ResultSet(IntPtr res)
+ {
+
+ resultMeta = UtilsTools.GetResField(res);
+ resultData = UtilsTools.GetResData(res);
+ }
+
+ public ResultSet(List metas, List datas)
+ {
+ resultMeta = metas;
+ resultData = datas;
+ }
+
+ public List GetResultData()
+ {
+ return resultData;
+ }
+
+ public List GetResultMeta()
+ {
+ return resultMeta;
+ }
+
+ public int GetFieldsNum()
+ {
+ return resultMeta.Count;
+ }
+ }
+
+
+}
diff --git a/examples/C#/insertCn/lib/Utils.cs b/examples/C#/insertCn/lib/Utils.cs
new file mode 100644
index 0000000000000000000000000000000000000000..6107ecab57869fbdd001988d54ba36930bb1fd0d
--- /dev/null
+++ b/examples/C#/insertCn/lib/Utils.cs
@@ -0,0 +1,418 @@
+using System;
+using TDengineDriver;
+using System.Runtime.InteropServices;
+using System.Text;
+using System.Collections.Generic;
+namespace Test.UtilsTools
+{
+ public class UtilsTools
+ {
+
+ static string ip = "127.0.0.1";
+ static string user = "root";
+ static string password = "taosdata";
+ static string db = "";
+ static short port = 0;
+ //get a tdengine connection
+ public static IntPtr TDConnection()
+ {
+ TDengine.Options((int)TDengineInitOption.TDDB_OPTION_CONFIGDIR, GetConfigPath());
+ TDengine.Options((int)TDengineInitOption.TDDB_OPTION_SHELL_ACTIVITY_TIMER, "60");
+ TDengine.Init();
+ IntPtr conn = TDengine.Connect(ip, user, password, db, port);
+ return conn;
+ }
+ //get taos.cfg file based on different os
+ public static string GetConfigPath()
+ {
+ string configDir = "" ;
+ if(OperatingSystem.IsOSPlatform("Windows"))
+ {
+ configDir = "C:/TDengine/cfg";
+ }
+ else if(OperatingSystem.IsOSPlatform("Linux"))
+ {
+ configDir = "/etc/taos";
+ }
+ else if(OperatingSystem.IsOSPlatform("macOS"))
+ {
+ configDir = "/etc/taos";
+ }
+ return configDir;
+ }
+
+ public static IntPtr ExecuteQuery(IntPtr conn, String sql)
+ {
+ IntPtr res = TDengine.Query(conn, sql);
+ if (!IsValidResult(res))
+ {
+ Console.Write(sql.ToString() + " failure, ");
+ ExitProgram();
+ }
+ else
+ {
+ Console.WriteLine(sql.ToString() + " success");
+ }
+ return res;
+ }
+
+ public static IntPtr ExecuteErrorQuery(IntPtr conn, String sql)
+ {
+ IntPtr res = TDengine.Query(conn, sql);
+ if (!IsValidResult(res))
+ {
+ Console.Write(sql.ToString() + " failure, ");
+ ExitProgram();
+ }
+ else
+ {
+ Console.WriteLine(sql.ToString() + " success");
+
+ }
+ return res;
+ }
+
+ public static void ExecuteUpdate(IntPtr conn, String sql)
+ {
+ IntPtr res = TDengine.Query(conn, sql);
+ if (!IsValidResult(res))
+ {
+ Console.Write(sql.ToString() + " failure, ");
+ ExitProgram();
+ }
+ else
+ {
+ Console.WriteLine(sql.ToString() + " success");
+
+ }
+ TDengine.FreeResult(res);
+ }
+
+
+ public static bool IsValidResult(IntPtr res)
+ {
+ if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0))
+ {
+ if (res != IntPtr.Zero)
+ {
+ Console.Write("reason: " + TDengine.Error(res));
+ return false;
+ }
+ Console.WriteLine("");
+ return false;
+ }
+ return true;
+ }
+ public static void CloseConnection(IntPtr conn)
+ {
+ if (conn != IntPtr.Zero)
+ {
+ if (TDengine.Close(conn) == 0)
+ {
+ Console.WriteLine("close connection sucess");
+ }
+ else
+ {
+ Console.WriteLine("close Connection failed");
+ }
+ }
+ TDengine.Cleanup();
+ }
+ public static List GetResField(IntPtr res)
+ {
+ List metas = TDengine.FetchFields(res);
+ return metas;
+ }
+ public static void AssertEqual(string expectVal, string actualVal)
+ {
+ if (expectVal == actualVal)
+ {
+ Console.WriteLine("{0}=={1} pass", expectVal, actualVal);
+ }
+ else
+ {
+ Console.WriteLine("{0}=={1} failed", expectVal, actualVal);
+ ExitProgram();
+ }
+ }
+ public static void ExitProgram()
+ {
+ TDengine.Cleanup();
+ System.Environment.Exit(1);
+ }
+ public static List GetResData(IntPtr res)
+ {
+ List dataRaw = new List();
+ if (!IsValidResult(res))
+ {
+ ExitProgram();
+ }
+ List metas = GetResField(res);
+ dataRaw = QueryRes(res, metas);
+ return dataRaw;
+ }
+
+ public static TDengineMeta ConstructTDengineMeta(string name, string type)
+ {
+
+ TDengineMeta _meta = new TDengineMeta();
+ _meta.name = name;
+ char[] separators = new char[] { '(', ')' };
+ string[] subs = type.Split(separators, StringSplitOptions.RemoveEmptyEntries);
+
+ switch (subs[0].ToUpper())
+ {
+ case "BOOL":
+ _meta.type = 1;
+ _meta.size = 1;
+ break;
+ case "TINYINT":
+ _meta.type = 2;
+ _meta.size = 1;
+ break;
+ case "SMALLINT":
+ _meta.type = 3;
+ _meta.size = 2;
+ break;
+ case "INT":
+ _meta.type = 4;
+ _meta.size = 4;
+ break;
+ case "BIGINT":
+ _meta.type = 5;
+ _meta.size = 8;
+ break;
+ case "TINYINT UNSIGNED":
+ _meta.type = 11;
+ _meta.size = 1;
+ break;
+ case "SMALLINT UNSIGNED":
+ _meta.type = 12;
+ _meta.size = 2;
+ break;
+ case "INT UNSIGNED":
+ _meta.type = 13;
+ _meta.size = 4;
+ break;
+ case "BIGINT UNSIGNED":
+ _meta.type = 14;
+ _meta.size = 8;
+ break;
+ case "FLOAT":
+ _meta.type = 6;
+ _meta.size = 4;
+ break;
+ case "DOUBLE":
+ _meta.type = 7;
+ _meta.size = 8;
+ break;
+ case "BINARY":
+ _meta.type = 8;
+ _meta.size = short.Parse(subs[1]);
+ break;
+ case "TIMESTAMP":
+ _meta.type = 9;
+ _meta.size = 8;
+ break;
+ case "NCHAR":
+ _meta.type = 10;
+ _meta.size = short.Parse(subs[1]);
+ break;
+ case "JSON":
+ _meta.type = 15;
+ _meta.size = 4096;
+ break;
+ default:
+ _meta.type = byte.MaxValue;
+ _meta.size = 0;
+ break;
+ }
+ return _meta;
+ }
+
+ private static List QueryRes(IntPtr res, List metas)
+ {
+ IntPtr rowdata;
+ long queryRows = 0;
+ List dataRaw = new List();
+ int fieldCount = metas.Count;
+ while ((rowdata = TDengine.FetchRows(res)) != IntPtr.Zero)
+ {
+ queryRows++;
+ IntPtr colLengthPtr = TDengine.FetchLengths(res);
+ int[] colLengthArr = new int[fieldCount];
+ Marshal.Copy(colLengthPtr, colLengthArr, 0, fieldCount);
+
+ for (int fields = 0; fields < fieldCount; ++fields)
+ {
+ TDengineMeta meta = metas[fields];
+ int offset = IntPtr.Size * fields;
+ IntPtr data = Marshal.ReadIntPtr(rowdata, offset);
+
+ if (data == IntPtr.Zero)
+ {
+ dataRaw.Add("NULL");
+ continue;
+ }
+
+ switch ((TDengineDataType)meta.type)
+ {
+ case TDengineDataType.TSDB_DATA_TYPE_BOOL:
+ bool v1 = Marshal.ReadByte(data) == 0 ? false : true;
+ dataRaw.Add(v1);
+ break;
+ case TDengineDataType.TSDB_DATA_TYPE_TINYINT:
+ sbyte v2 = (sbyte)Marshal.ReadByte(data);
+ dataRaw.Add(v2);
+ break;
+ case TDengineDataType.TSDB_DATA_TYPE_SMALLINT:
+ short v3 = Marshal.ReadInt16(data);
+ dataRaw.Add(v3);
+ break;
+ case TDengineDataType.TSDB_DATA_TYPE_INT:
+ int v4 = Marshal.ReadInt32(data);
+ dataRaw.Add(v4);
+ break;
+ case TDengineDataType.TSDB_DATA_TYPE_BIGINT:
+ long v5 = Marshal.ReadInt64(data);
+ dataRaw.Add(v5);
+ break;
+ case TDengineDataType.TSDB_DATA_TYPE_FLOAT:
+ float v6 = (float)Marshal.PtrToStructure(data, typeof(float));
+ dataRaw.Add(v6);
+ break;
+ case TDengineDataType.TSDB_DATA_TYPE_DOUBLE:
+ double v7 = (double)Marshal.PtrToStructure(data, typeof(double));
+ dataRaw.Add(v7);
+ break;
+ case TDengineDataType.TSDB_DATA_TYPE_BINARY:
+ // string v8 = Marshal.PtrToStringAnsi(data, colLengthArr[fields]);
+ string v8 = Marshal.PtrToStringUTF8(data, colLengthArr[fields]);
+ dataRaw.Add(v8);
+ break;
+ case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP:
+ long v9 = Marshal.ReadInt64(data);
+ dataRaw.Add(v9);
+ break;
+ case TDengineDataType.TSDB_DATA_TYPE_NCHAR:
+ // string v10 = Marshal.PtrToStringAnsi(data, colLengthArr[fields]);
+ string v10 = Marshal.PtrToStringUTF8(data, colLengthArr[fields]);
+ dataRaw.Add(v10);
+ break;
+ case TDengineDataType.TSDB_DATA_TYPE_UTINYINT:
+ byte v12 = Marshal.ReadByte(data);
+ dataRaw.Add(v12);
+ break;
+ case TDengineDataType.TSDB_DATA_TYPE_USMALLINT:
+ ushort v13 = (ushort)Marshal.ReadInt16(data);
+ dataRaw.Add(v13);
+ break;
+ case TDengineDataType.TSDB_DATA_TYPE_UINT:
+ uint v14 = (uint)Marshal.ReadInt32(data);
+ dataRaw.Add(v14);
+ break;
+ case TDengineDataType.TSDB_DATA_TYPE_UBIGINT:
+ ulong v15 = (ulong)Marshal.ReadInt64(data);
+ dataRaw.Add(v15);
+ break;
+ default:
+ dataRaw.Add("unknown value");
+ break;
+ }
+ }
+
+ }
+ if (TDengine.ErrorNo(res) != 0)
+ {
+ Console.Write("Query is not complete, Error {0:G}", TDengine.ErrorNo(res), TDengine.Error(res));
+ }
+ TDengine.FreeResult(res);
+ Console.WriteLine("");
+ return dataRaw;
+ }
+
+ // Generate insert sql for the with the coldata and tag data
+ public static string ConstructInsertSql(string table,string stable,List colData,List tagData,int numOfRows)
+ {
+ int numofFileds = colData.Count / numOfRows;
+ StringBuilder insertSql;
+
+ if (stable == "")
+ {
+ insertSql = new StringBuilder($"insert into {table} values(");
+ }
+ else
+ {
+ insertSql = new StringBuilder($"insert into {table} using {stable} tags(");
+
+ for (int j = 0; j < tagData.Count; j++)
+ {
+ if (tagData[j] is String)
+ {
+ insertSql.Append('\'');
+ insertSql.Append(tagData[j]);
+ insertSql.Append('\'');
+ }
+ else
+ {
+ insertSql.Append(tagData[j]);
+ }
+ if (j + 1 != tagData.Count)
+ {
+ insertSql.Append(',');
+ }
+ }
+
+ insertSql.Append(")values(");
+ }
+ for (int i = 0; i < colData.Count; i++)
+ {
+
+ if (colData[i] is String)
+ {
+ insertSql.Append('\'');
+ insertSql.Append(colData[i]);
+ insertSql.Append('\'');
+ }
+ else
+ {
+ insertSql.Append(colData[i]);
+ }
+
+ if ((i + 1) % numofFileds == 0 && (i + 1) != colData.Count)
+ {
+ insertSql.Append(")(");
+ }
+ else if ((i + 1) == colData.Count)
+ {
+ insertSql.Append(')');
+ }
+ else
+ {
+ insertSql.Append(',');
+ }
+ }
+ insertSql.Append(';');
+ //Console.WriteLine(insertSql.ToString());
+
+ return insertSql.ToString();
+ }
+
+ public static List CombineColAndTagData(List colData,List tagData, int numOfRows)
+ {
+ var list = new List();
+ for (int i = 0; i < colData.Count; i++)
+ {
+ list.Add(colData[i]);
+ if ((i + 1) % (colData.Count / numOfRows) == 0)
+ {
+ for (int j = 0; j < tagData.Count; j++)
+ {
+ list.Add(tagData[j]);
+ }
+ }
+ }
+ return list;
+ }
+ }
+}
diff --git a/examples/C#/jsonTag/JsonTag.cs b/examples/C#/jsonTag/JsonTag.cs
index 453e54eabdc9a4ec61cdc2a061af69ed64753416..5c94df8b5a36bf20589250567e0352cfe7ef9b25 100644
--- a/examples/C#/jsonTag/JsonTag.cs
+++ b/examples/C#/jsonTag/JsonTag.cs
@@ -11,7 +11,7 @@ namespace Cases
IntPtr conn = IntPtr.Zero;
Console.WriteLine("===================JsonTagTest====================");
conn = conn = UtilsTools.TDConnection("127.0.0.1", "root", "taosdata", "", 0);
- UtilsTools.ExecuteUpdate(conn, "create database if not exists csharp_sample keep 3650");
+ UtilsTools.ExecuteUpdate(conn, "create database if not exists csharp keep 3650");
UtilsTools.ExecuteUpdate(conn, "use csharp");
JsonTagSample jsonTagSample = new JsonTagSample();
jsonTagSample.Test(conn);
diff --git a/examples/C#/jsonTag/Util.cs b/examples/C#/jsonTag/Util.cs
index 5138938df60532616e75b45d8a95597c322dfd1a..7446378fc75a4cfe49840f778961e92c37d7699d 100644
--- a/examples/C#/jsonTag/Util.cs
+++ b/examples/C#/jsonTag/Util.cs
@@ -217,10 +217,10 @@ namespace Utils
}
}
}
- public static void ExitProgram()
+ public static void ExitProgram(int i = 1)
{
TDengine.Cleanup();
- System.Environment.Exit(0);
+ System.Environment.Exit(i);
}
}
}
\ No newline at end of file
diff --git a/examples/C#/jsonTag/jsonTag.csproj b/examples/C#/jsonTag/jsonTag.csproj
index ed3af6e806f0321828742597d226011bfb4d5185..eb33d899ac803baadbcfc4f1ee4a4888ed6434ab 100644
--- a/examples/C#/jsonTag/jsonTag.csproj
+++ b/examples/C#/jsonTag/jsonTag.csproj
@@ -5,8 +5,8 @@
net5.0
-
-
+
+
diff --git a/examples/C#/schemaless/schemaless.csproj b/examples/C#/schemaless/schemaless.csproj
index d132e34589525826d5b0ff0f0055156fad2d5a38..c2369f3e8eaf82188f6c55b7bb6cf8564eb9017b 100644
--- a/examples/C#/schemaless/schemaless.csproj
+++ b/examples/C#/schemaless/schemaless.csproj
@@ -5,8 +5,8 @@
net5.0
-
-
+
+
diff --git a/examples/C#/schemaless/schemalessSample.cs b/examples/C#/schemaless/schemalessSample.cs
index f27ac352a6fc8a3fbbaf84966ae3b82e6036e91a..8d0b7f60d0dad60d382887e9c0661f72ca522c18 100644
--- a/examples/C#/schemaless/schemalessSample.cs
+++ b/examples/C#/schemaless/schemalessSample.cs
@@ -289,7 +289,7 @@ namespace TDengineDriver
static void ExitProgram()
{
- System.Environment.Exit(0);
+ System.Environment.Exit(1);
}
public void cleanup()
diff --git a/examples/C#/stmt/StmtDemo.cs b/examples/C#/stmt/StmtDemo.cs
index fdd647fdb5f9c4bb528a2e99acc6975adf4c30a3..56a5aa20f3456524d9cca4f056d5510de23d4689 100644
--- a/examples/C#/stmt/StmtDemo.cs
+++ b/examples/C#/stmt/StmtDemo.cs
@@ -543,7 +543,7 @@ namespace TDengineDriver
public static void ExitProgram()
{
TDengine.Cleanup();
- System.Environment.Exit(0);
+ System.Environment.Exit(1);
}
}
}
diff --git a/examples/c/makefile b/examples/c/makefile
index 4d6cfc1f5f3e8d4d8b0a7ce88ce285c1b3259a5a..6f0ab8880aeb3f4be9b2596edd4e819914d67617 100644
--- a/examples/c/makefile
+++ b/examples/c/makefile
@@ -7,7 +7,6 @@ LFLAGS = '-Wl,-rpath,/usr/local/taos/driver/' -ltaos -lpthread -lm -lrt
CFLAGS = -O3 -g -Wall -Wno-deprecated -fPIC -Wno-unused-result -Wconversion \
-Wno-char-subscripts -D_REENTRANT -Wno-format -D_REENTRANT -DLINUX \
-Wno-unused-function -D_M_X64 -I/usr/local/taos/include -std=gnu99 \
- -I../../../deps/cJson/inc \
-Wno-unused-function -D_M_X64 -I/usr/local/taos/include -std=gnu99 \
-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment
diff --git a/packaging/release.sh b/packaging/release.sh
index 4823c9d10b4f15de76b79f65a0b542d8e99aa5b3..0ad8d9b1bfaa09a4be51c8448c2494feff2cdbf7 100755
--- a/packaging/release.sh
+++ b/packaging/release.sh
@@ -198,6 +198,7 @@ fi
if [[ "$dbName" != "taos" ]]; then
source ${enterprise_dir}/packaging/oem/sed_$dbName.sh
replace_community_$dbName
+ replace_output_$dbName
fi
if [[ "$httpdBuild" == "true" ]]; then
@@ -224,6 +225,7 @@ if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" =
else
if [[ "$dbName" != "taos" ]]; then
replace_enterprise_$dbName
+ replace_output_$dbName
fi
cmake ../../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro}
fi
diff --git a/packaging/tools/install_arbi.sh b/packaging/tools/install_arbi.sh
index 6e9be8489b84be8c3695c67a7037a4e2be2fa9e2..31a9ce38ac2c8fe1998c73680fd48183f56c1077 100755
--- a/packaging/tools/install_arbi.sh
+++ b/packaging/tools/install_arbi.sh
@@ -76,12 +76,21 @@ elif echo $osinfo | grep -qwi "debian"; then
elif echo $osinfo | grep -qwi "Kylin"; then
# echo "This is Kylin system"
os_type=1
+elif echo $osinfo | grep -qwi "Red"; then
+ # echo "This is Red Hat system"
+ os_type=1
elif echo $osinfo | grep -qwi "centos"; then
# echo "This is centos system"
os_type=2
elif echo $osinfo | grep -qwi "fedora"; then
# echo "This is fedora system"
os_type=2
+elif echo $osinfo | grep -qwi "Linx"; then
+ # echo "This is Linx system"
+ os_type=1
+ service_mod=0
+ initd_mod=0
+ service_config_dir="/etc/systemd/system"
else
echo " osinfo: ${osinfo}"
echo " This is an officially unverified linux system,"
diff --git a/src/client/src/tscGlobalmerge.c b/src/client/src/tscGlobalmerge.c
index b8d47022b4d00cf4025904fee91ee3dba232a59c..82f08fc81da997cc1b938d8bf1d21b316ffb912c 100644
--- a/src/client/src/tscGlobalmerge.c
+++ b/src/client/src/tscGlobalmerge.c
@@ -33,12 +33,12 @@ typedef struct SCompareParam {
int32_t groupOrderType;
} SCompareParam;
-static bool needToMerge(SSDataBlock* pBlock, SArray* columnIndexList, int32_t index, char **buf) {
+static bool needToMerge(SSDataBlock* pBlock, SArray* columnIndexList, int32_t idx, char **buf) {
int32_t ret = 0;
size_t size = taosArrayGetSize(columnIndexList);
if (size > 0) {
- ret = compare_aRv(pBlock, columnIndexList, (int32_t) size, index, buf, TSDB_ORDER_ASC);
+ ret = compare_aRv(pBlock, columnIndexList, (int32_t) size, idx, buf, TSDB_ORDER_ASC);
}
// if ret == 0, means the result belongs to the same group
@@ -563,9 +563,9 @@ static void savePrevOrderColumns(char** prevRow, SArray* pColumnList, SSDataBloc
int32_t size = (int32_t) taosArrayGetSize(pColumnList);
for(int32_t i = 0; i < size; ++i) {
- SColIndex* index = taosArrayGet(pColumnList, i);
- SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, index->colIndex);
- assert(index->colId == pColInfo->info.colId);
+ SColIndex* idx = taosArrayGet(pColumnList, i);
+ SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, idx->colIndex);
+ assert(idx->colId == pColInfo->info.colId);
memcpy(prevRow[i], pColInfo->pData + pColInfo->info.bytes * rowIndex, pColInfo->info.bytes);
}
@@ -603,7 +603,7 @@ static void doMergeResultImpl(SOperatorInfo* pInfo, SQLFunctionCtx *pCtx, int32_
for (int32_t j = 0; j < numOfExpr; ++j) {
int32_t functionId = pCtx[j].functionId;
- if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
+ if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_COL_DUMMY) {
continue;
}
@@ -625,7 +625,7 @@ static void doMergeResultImpl(SOperatorInfo* pInfo, SQLFunctionCtx *pCtx, int32_
static void doFinalizeResultImpl(SMultiwayMergeInfo* pInfo, SQLFunctionCtx *pCtx, int32_t numOfExpr) {
for(int32_t j = 0; j < numOfExpr; ++j) {
int32_t functionId = pCtx[j].functionId;
- if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
+ if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_COL_DUMMY) {
continue;
}
diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c
index 39cbd6789f46a0151883e1d4df29c2b38b02f919..cda3b0a50af7014f1cd2df728cdb40b23cc11ba1 100644
--- a/src/client/src/tscLocal.c
+++ b/src/client/src/tscLocal.c
@@ -152,7 +152,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols, int32_t typeColLength,
int32_t noteColLength) {
int32_t rowLen = 0;
- SColumnIndex index = {0};
+ SColumnIndex idx = {0, 0};
pSql->cmd.numOfCols = numOfCols;
@@ -163,7 +163,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
tstrncpy(f.name, "Field", sizeof(f.name));
SInternalField* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
- pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY,
+ pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &idx, TSDB_DATA_TYPE_BINARY,
(TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE, -1000, (TSDB_COL_NAME_LEN - 1), false);
rowLen += ((TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE);
@@ -173,7 +173,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
tstrncpy(f.name, "Type", sizeof(f.name));
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
- pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (int16_t)(typeColLength + VARSTR_HEADER_SIZE),
+ pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &idx, TSDB_DATA_TYPE_BINARY, (int16_t)(typeColLength + VARSTR_HEADER_SIZE),
-1000, typeColLength, false);
rowLen += typeColLength + VARSTR_HEADER_SIZE;
@@ -183,7 +183,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
tstrncpy(f.name, "Length", sizeof(f.name));
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
- pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_INT, sizeof(int32_t),
+ pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &idx, TSDB_DATA_TYPE_INT, sizeof(int32_t),
-1000, sizeof(int32_t), false);
rowLen += sizeof(int32_t);
@@ -193,7 +193,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
tstrncpy(f.name, "Note", sizeof(f.name));
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
- pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (int16_t)(noteColLength + VARSTR_HEADER_SIZE),
+ pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &idx, TSDB_DATA_TYPE_BINARY, (int16_t)(noteColLength + VARSTR_HEADER_SIZE),
-1000, noteColLength, false);
rowLen += noteColLength + VARSTR_HEADER_SIZE;
@@ -415,7 +415,7 @@ static int32_t tscGetTableTagValue(SCreateBuilder *builder, char *result) {
static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const char *ddl) {
int32_t rowLen = 0;
int16_t ddlLen = (int16_t)strlen(ddl);
- SColumnIndex index = {0};
+ SColumnIndex idx = {0};
pSql->cmd.numOfCols = 2;
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
@@ -433,7 +433,7 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const
}
SInternalField* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
- pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, f.bytes, -1000, f.bytes - VARSTR_HEADER_SIZE, false);
+ pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &idx, TSDB_DATA_TYPE_BINARY, f.bytes, -1000, f.bytes - VARSTR_HEADER_SIZE, false);
rowLen += f.bytes;
@@ -446,7 +446,7 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const
}
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
- pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY,
+ pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &idx, TSDB_DATA_TYPE_BINARY,
(int16_t)(ddlLen + VARSTR_HEADER_SIZE), -1000, ddlLen, false);
rowLen += ddlLen + VARSTR_HEADER_SIZE;
diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c
index ad209839bb24b7a34fc761a4111997db4076b4de..38aee8a6787d5ae99b1d98ec0b2fadd42208ac5a 100644
--- a/src/client/src/tscParseInsert.c
+++ b/src/client/src/tscParseInsert.c
@@ -61,8 +61,8 @@ int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, SParsedDataColIn
return TSDB_CODE_SUCCESS;
}
-int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec) {
- int32_t index = 0;
+int tsParseTime(SStrToken *pToken, int64_t *pTime, char **next, char *err, int16_t timePrec) {
+ int32_t idx = 0;
SStrToken sToken;
int64_t interval;
int64_t useconds = 0;
@@ -80,8 +80,8 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
useconds = taosStr2int64(pToken->z);
} else {
// strptime("2001-11-12 18:31:01", "%Y-%m-%d %H:%M:%S", &tm);
- if (taosParseTime(pToken->z, time, pToken->n, timePrec, tsDaylight) != TSDB_CODE_SUCCESS) {
- return tscInvalidOperationMsg(error, "invalid timestamp format", pToken->z);
+ if (taosParseTime(pToken->z, pTime, pToken->n, timePrec, tsDaylight) != TSDB_CODE_SUCCESS) {
+ return tscInvalidOperationMsg(err, "invalid timestamp format", pToken->z);
}
return TSDB_CODE_SUCCESS;
@@ -91,7 +91,7 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
if (isspace(pToken->z[k])) continue;
if (pToken->z[k] == ',') {
*next = pTokenEnd;
- *time = useconds;
+ *pTime = useconds;
return 0;
}
@@ -103,17 +103,17 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
* e.g., now+12a, now-5h
*/
SStrToken valueToken;
- index = 0;
- sToken = tStrGetToken(pTokenEnd, &index, false);
- pTokenEnd += index;
+ idx = 0;
+ sToken = tStrGetToken(pTokenEnd, &idx, false);
+ pTokenEnd += idx;
if (sToken.type == TK_MINUS || sToken.type == TK_PLUS) {
- index = 0;
- valueToken = tStrGetToken(pTokenEnd, &index, false);
- pTokenEnd += index;
+ idx = 0;
+ valueToken = tStrGetToken(pTokenEnd, &idx, false);
+ pTokenEnd += idx;
if (valueToken.n < 2) {
- return tscInvalidOperationMsg(error, "value expected in timestamp", sToken.z);
+ return tscInvalidOperationMsg(err, "value expected in timestamp", sToken.z);
}
char unit = 0;
@@ -130,7 +130,7 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
*next = pTokenEnd;
}
- *time = useconds;
+ *pTime = useconds;
return TSDB_CODE_SUCCESS;
}
@@ -433,7 +433,7 @@ int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start) {
int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, int32_t *len, char *tmpTokenBuf,
SInsertStatementParam *pInsertParam) {
- int32_t index = 0;
+ int32_t idx = 0;
SStrToken sToken = {0};
char *row = pDataBlocks->pData + pDataBlocks->size; // skip the SSubmitBlk header
@@ -455,9 +455,9 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i
SSchema *pSchema = &schema[colIndex]; // get colId here
- index = 0;
- sToken = tStrGetToken(*str, &index, true);
- *str += index;
+ idx = 0;
+ sToken = tStrGetToken(*str, &idx, true);
+ *str += idx;
if (sToken.type == TK_QUESTION) {
if (!isParseBindParam) {
@@ -564,7 +564,7 @@ int32_t boundIdxCompar(const void *lhs, const void *rhs) {
int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SInsertStatementParam *pInsertParam,
int32_t* numOfRows, char *tmpTokenBuf) {
- int32_t index = 0;
+ int32_t idx = 0;
int32_t code = 0;
(*numOfRows) = 0;
@@ -584,11 +584,11 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SIn
pDataBlock->rowBuilder.rowSize = extendedRowSize;
while (1) {
- index = 0;
- sToken = tStrGetToken(*str, &index, false);
+ idx = 0;
+ sToken = tStrGetToken(*str, &idx, false);
if (sToken.n == 0 || sToken.type != TK_LP) break;
- *str += index;
+ *str += idx;
if ((*numOfRows) >= maxRows || pDataBlock->size + extendedRowSize >= pDataBlock->nAllocSize) {
int32_t tSize;
code = tscAllocateMemIfNeed(pDataBlock, extendedRowSize, &tSize);
@@ -609,13 +609,13 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SIn
pDataBlock->size += len;
- index = 0;
- sToken = tStrGetToken(*str, &index, false);
+ idx = 0;
+ sToken = tStrGetToken(*str, &idx, false);
if (sToken.n == 0 || sToken.type != TK_RP) {
return tscSQLSyntaxErrMsg(pInsertParam->msg, ") expected", *str);
}
- *str += index;
+ *str += idx;
(*numOfRows)++;
}
@@ -876,7 +876,7 @@ int validateTableName(char *tblName, int len, SStrToken* psTblToken, bool *dbInc
static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundColumn) {
- int32_t index = 0;
+ int32_t idx = 0;
SStrToken sToken = {0};
SStrToken tableToken = {0};
int32_t code = TSDB_CODE_SUCCESS;
@@ -891,14 +891,14 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
char *sql = *sqlstr;
// get the token of specified table
- index = 0;
- tableToken = tStrGetToken(sql, &index, false);
- sql += index;
+ idx = 0;
+ tableToken = tStrGetToken(sql, &idx, false);
+ sql += idx;
// skip possibly exists column list
- index = 0;
- sToken = tStrGetToken(sql, &index, false);
- sql += index;
+ idx = 0;
+ sToken = tStrGetToken(sql, &idx, false);
+ sql += idx;
int32_t numOfColList = 0;
@@ -907,8 +907,8 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
*boundColumn = &sToken.z[0];
while (1) {
- index = 0;
- sToken = tStrGetToken(sql, &index, false);
+ idx = 0;
+ sToken = tStrGetToken(sql, &idx, false);
if (sToken.type == TK_ILLEGAL) {
return tscSQLSyntaxErrMsg(pCmd->payload, "unrecognized token", sToken.z);
@@ -918,12 +918,12 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
break;
}
- sql += index;
+ sql += idx;
++numOfColList;
}
- sToken = tStrGetToken(sql, &index, false);
- sql += index;
+ sToken = tStrGetToken(sql, &idx, false);
+ sql += idx;
}
if (numOfColList == 0 && (*boundColumn) != NULL) {
@@ -933,9 +933,9 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, TABLE_INDEX);
if (sToken.type == TK_USING) { // create table if not exists according to the super table
- index = 0;
- sToken = tStrGetToken(sql, &index, false);
- sql += index;
+ idx = 0;
+ sToken = tStrGetToken(sql, &idx, false);
+ sql += idx;
if (sToken.type == TK_ILLEGAL) {
return tscSQLSyntaxErrMsg(pCmd->payload, NULL, sql);
@@ -980,8 +980,8 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
SParsedDataColInfo spd = {0};
tscSetBoundColumnInfo(&spd, pTagSchema, tscGetNumOfTags(pSTableMetaInfo->pTableMeta));
- index = 0;
- sToken = tStrGetToken(sql, &index, false);
+ idx = 0;
+ sToken = tStrGetToken(sql, &idx, false);
if (sToken.type != TK_TAGS && sToken.type != TK_LP) {
tscDestroyBoundColumnInfo(&spd);
return tscSQLSyntaxErrMsg(pInsertParam->msg, "keyword TAGS expected", sToken.z);
@@ -1002,16 +1002,16 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
sql = end;
- index = 0; // keywords of "TAGS"
- sToken = tStrGetToken(sql, &index, false);
- sql += index;
+ idx = 0; // keywords of "TAGS"
+ sToken = tStrGetToken(sql, &idx, false);
+ sql += idx;
} else {
- sql += index;
+ sql += idx;
}
- index = 0;
- sToken = tStrGetToken(sql, &index, false);
- sql += index;
+ idx = 0;
+ sToken = tStrGetToken(sql, &idx, false);
+ sql += idx;
if (sToken.type != TK_LP) {
tscDestroyBoundColumnInfo(&spd);
@@ -1027,9 +1027,9 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
for (int i = 0; i < spd.numOfBound; ++i) {
SSchema* pSchema = &pTagSchema[spd.boundedColumns[i]];
- index = 0;
- sToken = tStrGetToken(sql, &index, true);
- sql += index;
+ idx = 0;
+ sToken = tStrGetToken(sql, &idx, true);
+ sql += idx;
if (TK_ILLEGAL == sToken.type) {
tdDestroyKVRowBuilder(&kvRowBuilder);
@@ -1101,9 +1101,9 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
free(row);
pInsertParam->tagData.data = pTag;
- index = 0;
- sToken = tStrGetToken(sql, &index, false);
- sql += index;
+ idx = 0;
+ sToken = tStrGetToken(sql, &idx, false);
+ sql += idx;
if (sToken.n == 0 || sToken.type != TK_RP) {
return tscSQLSyntaxErrMsg(pInsertParam->msg, ") expected", sToken.z);
}
@@ -1112,9 +1112,9 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
* insert into table_name using super_table(tag_name1, tag_name2) tags(tag_val1, tag_val2)
* (normal_col1, normal_col2) values(normal_col1_val, normal_col2_val);
* */
- index = 0;
- sToken = tStrGetToken(sql, &index, false);
- sql += index;
+ idx = 0;
+ sToken = tStrGetToken(sql, &idx, false);
+ sql += idx;
int numOfColsAfterTags = 0;
if (sToken.type == TK_LP) {
if (*boundColumn != NULL) {
@@ -1124,18 +1124,18 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
}
while (1) {
- index = 0;
- sToken = tStrGetToken(sql, &index, false);
+ idx = 0;
+ sToken = tStrGetToken(sql, &idx, false);
if (sToken.type == TK_RP) {
break;
}
- if (sToken.n == 0 || sToken.type == TK_SEMI || index == 0) {
+ if (sToken.n == 0 || sToken.type == TK_SEMI || idx == 0) {
return tscSQLSyntaxErrMsg(pCmd->payload, "unexpected token", sql);
}
- sql += index;
+ sql += idx;
++numOfColsAfterTags;
}
@@ -1143,7 +1143,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
}
- sToken = tStrGetToken(sql, &index, false);
+ sToken = tStrGetToken(sql, &idx, false);
}
sql = sToken.z;
@@ -1213,9 +1213,9 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat
int32_t code = TSDB_CODE_SUCCESS;
- int32_t index = 0;
- SStrToken sToken = tStrGetToken(str, &index, false);
- str += index;
+ int32_t idx = 0;
+ SStrToken sToken = tStrGetToken(str, &idx, false);
+ str += idx;
if (sToken.type != TK_LP) {
code = tscSQLSyntaxErrMsg(pInsertParam->msg, "( is expected", sToken.z);
@@ -1225,9 +1225,9 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat
bool isOrdered = true;
int32_t lastColIdx = -1; // last column found
while (1) {
- index = 0;
- sToken = tStrGetToken(str, &index, false);
- str += index;
+ idx = 0;
+ sToken = tStrGetToken(str, &idx, false);
+ str += idx;
char tmpTokenBuf[TSDB_MAX_BYTES_PER_ROW] = {0}; // used for deleting Escape character backstick(`)
strncpy(tmpTokenBuf, sToken.z, sToken.n);
@@ -1404,8 +1404,8 @@ int tsParseInsertSql(SSqlObj *pSql) {
tscDebug("0x%"PRIx64" create data block list hashList:%p", pSql->self, pInsertParam->pTableBlockHashList);
while (1) {
- int32_t index = 0;
- SStrToken sToken = tStrGetToken(str, &index, false);
+ int32_t idx = 0;
+ SStrToken sToken = tStrGetToken(str, &idx, false);
// no data in the sql string anymore.
if (sToken.n == 0) {
@@ -1469,9 +1469,9 @@ int tsParseInsertSql(SSqlObj *pSql) {
goto _clean;
}
- index = 0;
- sToken = tStrGetToken(str, &index, false);
- str += index;
+ idx = 0;
+ sToken = tStrGetToken(str, &idx, false);
+ str += idx;
if (sToken.n == 0 || (sToken.type != TK_FILE && sToken.type != TK_VALUES)) {
code = tscSQLSyntaxErrMsg(pInsertParam->msg, "keyword VALUES or FILE required", sToken.z);
@@ -1484,13 +1484,13 @@ int tsParseInsertSql(SSqlObj *pSql) {
goto _clean;
}
- index = 0;
- sToken = tStrGetToken(str, &index, false);
+ idx = 0;
+ sToken = tStrGetToken(str, &idx, false);
if (sToken.type != TK_STRING && sToken.type != TK_ID) {
code = tscSQLSyntaxErrMsg(pInsertParam->msg, "file path is required following keyword FILE", sToken.z);
goto _clean;
}
- str += index;
+ str += idx;
if (sToken.n == 0) {
code = tscSQLSyntaxErrMsg(pInsertParam->msg, "file path is required following keyword FILE", sToken.z);
goto _clean;
@@ -1590,7 +1590,7 @@ int tsInsertInitialCheck(SSqlObj *pSql) {
return TSDB_CODE_TSC_NO_WRITE_AUTH;
}
- int32_t index = 0;
+ int32_t idx = 0;
SSqlCmd *pCmd = &pSql->cmd;
pCmd->count = 0;
@@ -1600,12 +1600,12 @@ int tsInsertInitialCheck(SSqlObj *pSql) {
SQueryInfo *pQueryInfo = tscGetQueryInfoS(pCmd);
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT);
- SStrToken sToken = tStrGetToken(pSql->sqlstr, &index, false);
+ SStrToken sToken = tStrGetToken(pSql->sqlstr, &idx, false);
if (sToken.type != TK_INSERT && sToken.type != TK_IMPORT) {
return tscSQLSyntaxErrMsg(pInsertParam->msg, NULL, sToken.z);
}
- sToken = tStrGetToken(pSql->sqlstr, &index, false);
+ sToken = tStrGetToken(pSql->sqlstr, &idx, false);
if (sToken.type != TK_INTO) {
return tscSQLSyntaxErrMsg(pInsertParam->msg, "keyword INTO is expected", sToken.z);
}
diff --git a/src/client/src/tscParseLineProtocol.c b/src/client/src/tscParseLineProtocol.c
index 66280c32e8bc9bfba69d6521453ff5d965e949be..8a1eb88e933ab2376f014088fcf47fc495bbebef 100644
--- a/src/client/src/tscParseLineProtocol.c
+++ b/src/client/src/tscParseLineProtocol.c
@@ -1966,14 +1966,14 @@ int32_t convertSmlTimeStamp(TAOS_SML_KV *pVal, char *value,
return TSDB_CODE_SUCCESS;
}
-static int32_t parseSmlTimeStamp(TAOS_SML_KV **pTS, const char **index, SSmlLinesInfo* info) {
+static int32_t parseSmlTimeStamp(TAOS_SML_KV **pTS, const char **idx, SSmlLinesInfo* info) {
const char *start, *cur;
int32_t ret = TSDB_CODE_SUCCESS;
int len = 0;
char key[] = "ts";
char *value = NULL;
- start = cur = *index;
+ start = cur = *idx;
*pTS = calloc(1, sizeof(TAOS_SML_KV));
while(*cur != '\0') {
@@ -2013,8 +2013,8 @@ bool checkDuplicateKey(char *key, SHashObj *pHash, SSmlLinesInfo* info) {
return false;
}
-static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash, SSmlLinesInfo* info) {
- const char *cur = *index;
+static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **idx, SHashObj *pHash, SSmlLinesInfo* info) {
+ const char *cur = *idx;
char key[TSDB_COL_NAME_LEN + 1]; // +1 to avoid key[len] over write
int16_t len = 0;
@@ -2048,12 +2048,12 @@ static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash
memcpy(pKV->key, key, len + 1);
addEscapeCharToString(pKV->key, len);
tscDebug("SML:0x%"PRIx64" Key:%s|len:%d", info->id, pKV->key, len);
- *index = cur + 1;
+ *idx = cur + 1;
return TSDB_CODE_SUCCESS;
}
-static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index,
+static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **idx,
bool *is_last_kv, SSmlLinesInfo* info, bool isTag) {
const char *start, *cur;
int32_t ret = TSDB_CODE_SUCCESS;
@@ -2077,7 +2077,7 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index,
val_rqoute
} val_state;
- start = cur = *index;
+ start = cur = *idx;
tag_state = tag_common;
val_state = val_common;
@@ -2100,17 +2100,17 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index,
}
if (*cur == '"') {
- if (cur == *index) {
+ if (cur == *idx) {
tag_state = tag_lqoute;
}
cur += 1;
len += 1;
break;
} else if (*cur == 'L') {
- line_len = strlen(*index);
+ line_len = strlen(*idx);
/* common character at the end */
- if (cur + 1 >= *index + line_len) {
+ if (cur + 1 >= *idx + line_len) {
*is_last_kv = true;
kv_done = true;
break;
@@ -2118,7 +2118,7 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index,
if (*(cur + 1) == '"') {
/* string starts here */
- if (cur + 1 == *index + 1) {
+ if (cur + 1 == *idx + 1) {
tag_state = tag_lqoute;
}
cur += 2;
@@ -2224,7 +2224,7 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index,
}
if (*cur == '"') {
- if (cur == *index) {
+ if (cur == *idx) {
val_state = val_lqoute;
} else {
if (*(cur - 1) != '\\') {
@@ -2238,10 +2238,10 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index,
len += 1;
break;
} else if (*cur == 'L') {
- line_len = strlen(*index);
+ line_len = strlen(*idx);
/* common character at the end */
- if (cur + 1 >= *index + line_len) {
+ if (cur + 1 >= *idx + line_len) {
*is_last_kv = true;
kv_done = true;
break;
@@ -2249,13 +2249,13 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index,
if (*(cur + 1) == '"') {
/* string starts here */
- if (cur + 1 == *index + 1) {
+ if (cur + 1 == *idx + 1) {
val_state = val_lqoute;
cur += 2;
len += 2;
} else {
/* MUST at the end of string */
- if (cur + 2 >= *index + line_len) {
+ if (cur + 2 >= *idx + line_len) {
cur += 2;
len += 2;
*is_last_kv = true;
@@ -2385,7 +2385,7 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index,
}
free(value);
- *index = (*cur == '\0') ? cur : cur + 1;
+ *idx = (*cur == '\0') ? cur : cur + 1;
return ret;
error:
@@ -2395,9 +2395,9 @@ error:
return ret;
}
-static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index,
+static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **idx,
uint8_t *has_tags, SSmlLinesInfo* info) {
- const char *cur = *index;
+ const char *cur = *idx;
int16_t len = 0;
pSml->stableName = calloc(TSDB_TABLE_NAME_LEN + TS_BACKQUOTE_CHAR_SIZE, 1);
@@ -2441,7 +2441,7 @@ static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index
return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
}
addEscapeCharToString(pSml->stableName, len);
- *index = cur + 1;
+ *idx = cur + 1;
tscDebug("SML:0x%"PRIx64" Stable name in measurement:%s|len:%d", info->id, pSml->stableName, len);
return TSDB_CODE_SUCCESS;
@@ -2464,10 +2464,10 @@ int32_t isValidChildTableName(const char *pTbName, int16_t len, SSmlLinesInfo* i
static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs,
- const char **index, bool isField,
+ const char **idx, bool isField,
TAOS_SML_DATA_POINT* smlData, SHashObj *pHash,
SSmlLinesInfo* info) {
- const char *cur = *index;
+ const char *cur = *idx;
int32_t ret = TSDB_CODE_SUCCESS;
TAOS_SML_KV *pkv;
bool is_last_kv = false;
@@ -2555,7 +2555,7 @@ static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs,
error:
return ret;
done:
- *index = cur;
+ *idx = cur;
return ret;
}
@@ -2575,13 +2575,13 @@ static void moveTimeStampToFirstKv(TAOS_SML_DATA_POINT** smlData, TAOS_SML_KV *t
}
int32_t tscParseLine(const char* sql, TAOS_SML_DATA_POINT* smlData, SSmlLinesInfo* info) {
- const char* index = sql;
+ const char* idx = sql;
int32_t ret = TSDB_CODE_SUCCESS;
uint8_t has_tags = 0;
TAOS_SML_KV *timestamp = NULL;
SHashObj *keyHashTable = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false);
- ret = parseSmlMeasurement(smlData, &index, &has_tags, info);
+ ret = parseSmlMeasurement(smlData, &idx, &has_tags, info);
if (ret) {
tscError("SML:0x%"PRIx64" Unable to parse measurement", info->id);
taosHashCleanup(keyHashTable);
@@ -2591,7 +2591,7 @@ int32_t tscParseLine(const char* sql, TAOS_SML_DATA_POINT* smlData, SSmlLinesInf
//Parse Tags
if (has_tags) {
- ret = parseSmlKvPairs(&smlData->tags, &smlData->tagNum, &index, false, smlData, keyHashTable, info);
+ ret = parseSmlKvPairs(&smlData->tags, &smlData->tagNum, &idx, false, smlData, keyHashTable, info);
if (ret) {
tscError("SML:0x%"PRIx64" Unable to parse tag", info->id);
taosHashCleanup(keyHashTable);
@@ -2601,7 +2601,7 @@ int32_t tscParseLine(const char* sql, TAOS_SML_DATA_POINT* smlData, SSmlLinesInf
tscDebug("SML:0x%"PRIx64" Parse tags finished, num of tags:%d", info->id, smlData->tagNum);
//Parse fields
- ret = parseSmlKvPairs(&smlData->fields, &smlData->fieldNum, &index, true, smlData, keyHashTable, info);
+ ret = parseSmlKvPairs(&smlData->fields, &smlData->fieldNum, &idx, true, smlData, keyHashTable, info);
if (ret) {
tscError("SML:0x%"PRIx64" Unable to parse field", info->id);
taosHashCleanup(keyHashTable);
@@ -2611,7 +2611,7 @@ int32_t tscParseLine(const char* sql, TAOS_SML_DATA_POINT* smlData, SSmlLinesInf
taosHashCleanup(keyHashTable);
//Parse timestamp
- ret = parseSmlTimeStamp(×tamp, &index, info);
+ ret = parseSmlTimeStamp(×tamp, &idx, info);
if (ret) {
tscError("SML:0x%"PRIx64" Unable to parse timestamp", info->id);
return ret;
diff --git a/src/client/src/tscParseOpenTSDB.c b/src/client/src/tscParseOpenTSDB.c
index 4b2738e567d7535bba170d390200b73cf794a4f2..525bfa4bd3ac1cdbb43d68ef4fa3697bd9b20ac3 100644
--- a/src/client/src/tscParseOpenTSDB.c
+++ b/src/client/src/tscParseOpenTSDB.c
@@ -33,8 +33,8 @@ static uint64_t genUID() {
return id;
}
-static int32_t parseTelnetMetric(TAOS_SML_DATA_POINT *pSml, const char **index, SSmlLinesInfo* info) {
- const char *cur = *index;
+static int32_t parseTelnetMetric(TAOS_SML_DATA_POINT *pSml, const char **idx, SSmlLinesInfo* info) {
+ const char *cur = *idx;
uint16_t len = 0;
pSml->stableName = tcalloc(TSDB_TABLE_NAME_LEN + TS_BACKQUOTE_CHAR_SIZE, 1);
@@ -76,13 +76,13 @@ static int32_t parseTelnetMetric(TAOS_SML_DATA_POINT *pSml, const char **index,
}
addEscapeCharToString(pSml->stableName, len);
- *index = cur + 1;
+ *idx = cur + 1;
tscDebug("OTD:0x%"PRIx64" Stable name in metric:%s|len:%d", info->id, pSml->stableName, len);
return TSDB_CODE_SUCCESS;
}
-static int32_t parseTelnetTimeStamp(TAOS_SML_KV **pTS, int *num_kvs, const char **index, SSmlLinesInfo* info) {
+static int32_t parseTelnetTimeStamp(TAOS_SML_KV **pTS, int *num_kvs, const char **idx, SSmlLinesInfo* info) {
//Timestamp must be the first KV to parse
assert(*num_kvs == 0);
@@ -92,7 +92,7 @@ static int32_t parseTelnetTimeStamp(TAOS_SML_KV **pTS, int *num_kvs, const char
char key[] = OTD_TIMESTAMP_COLUMN_NAME;
char *value = NULL;
- start = cur = *index;
+ start = cur = *idx;
//allocate fields for timestamp and value
*pTS = tcalloc(OTD_MAX_FIELDS_NUM, sizeof(TAOS_SML_KV));
@@ -130,12 +130,12 @@ static int32_t parseTelnetTimeStamp(TAOS_SML_KV **pTS, int *num_kvs, const char
addEscapeCharToString((*pTS)->key, (int32_t)strlen(key));
*num_kvs += 1;
- *index = cur + 1;
+ *idx = cur + 1;
return ret;
}
-static int32_t parseTelnetMetricValue(TAOS_SML_KV **pKVs, int *num_kvs, const char **index, SSmlLinesInfo* info) {
+static int32_t parseTelnetMetricValue(TAOS_SML_KV **pKVs, int *num_kvs, const char **idx, SSmlLinesInfo* info) {
//skip timestamp
TAOS_SML_KV *pVal = *pKVs + 1;
const char *start, *cur;
@@ -145,7 +145,7 @@ static int32_t parseTelnetMetricValue(TAOS_SML_KV **pKVs, int *num_kvs, const ch
char key[] = OTD_METRIC_VALUE_COLUMN_NAME;
char *value = NULL;
- start = cur = *index;
+ start = cur = *idx;
//if metric value is string
if (*cur == '"') {
@@ -201,12 +201,12 @@ static int32_t parseTelnetMetricValue(TAOS_SML_KV **pKVs, int *num_kvs, const ch
addEscapeCharToString(pVal->key, (int32_t)strlen(pVal->key));
*num_kvs += 1;
- *index = cur + 1;
+ *idx = cur + 1;
return ret;
}
-static int32_t parseTelnetTagKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash, SSmlLinesInfo* info) {
- const char *cur = *index;
+static int32_t parseTelnetTagKey(TAOS_SML_KV *pKV, const char **idx, SHashObj *pHash, SSmlLinesInfo* info) {
+ const char *cur = *idx;
char key[TSDB_COL_NAME_LEN];
uint16_t len = 0;
@@ -244,17 +244,17 @@ static int32_t parseTelnetTagKey(TAOS_SML_KV *pKV, const char **index, SHashObj
memcpy(pKV->key, key, len + 1);
addEscapeCharToString(pKV->key, len);
//tscDebug("OTD:0x%"PRIx64" Key:%s|len:%d", info->id, pKV->key, len);
- *index = cur + 1;
+ *idx = cur + 1;
return TSDB_CODE_SUCCESS;
}
-static int32_t parseTelnetTagValue(TAOS_SML_KV *pKV, const char **index,
+static int32_t parseTelnetTagValue(TAOS_SML_KV *pKV, const char **idx,
bool *is_last_kv, SSmlLinesInfo* info) {
const char *start, *cur;
char *value = NULL;
uint16_t len = 0;
- start = cur = *index;
+ start = cur = *idx;
while (1) {
// whitespace or '\0' identifies a value
@@ -290,14 +290,14 @@ static int32_t parseTelnetTagValue(TAOS_SML_KV *pKV, const char **index,
}
tfree(value);
- *index = (*cur == '\0') ? cur : cur + 1;
+ *idx = (*cur == '\0') ? cur : cur + 1;
return TSDB_CODE_SUCCESS;
}
static int32_t parseTelnetTagKvs(TAOS_SML_KV **pKVs, int *num_kvs,
- const char **index, char **childTableName,
+ const char **idx, char **childTableName,
SHashObj *pHash, SSmlLinesInfo* info) {
- const char *cur = *index;
+ const char *cur = *idx;
int32_t ret = TSDB_CODE_SUCCESS;
TAOS_SML_KV *pkv;
bool is_last_kv = false;
@@ -357,11 +357,11 @@ static int32_t parseTelnetTagKvs(TAOS_SML_KV **pKVs, int *num_kvs,
}
static int32_t tscParseTelnetLine(const char* line, TAOS_SML_DATA_POINT* smlData, SSmlLinesInfo* info) {
- const char* index = line;
+ const char* idx = line;
int32_t ret = TSDB_CODE_SUCCESS;
//Parse metric
- ret = parseTelnetMetric(smlData, &index, info);
+ ret = parseTelnetMetric(smlData, &idx, info);
if (ret) {
tscError("OTD:0x%"PRIx64" Unable to parse metric", info->id);
return ret;
@@ -369,7 +369,7 @@ static int32_t tscParseTelnetLine(const char* line, TAOS_SML_DATA_POINT* smlData
tscDebug("OTD:0x%"PRIx64" Parse metric finished", info->id);
//Parse timestamp
- ret = parseTelnetTimeStamp(&smlData->fields, &smlData->fieldNum, &index, info);
+ ret = parseTelnetTimeStamp(&smlData->fields, &smlData->fieldNum, &idx, info);
if (ret) {
tscError("OTD:0x%"PRIx64" Unable to parse timestamp", info->id);
return ret;
@@ -377,7 +377,7 @@ static int32_t tscParseTelnetLine(const char* line, TAOS_SML_DATA_POINT* smlData
tscDebug("OTD:0x%"PRIx64" Parse timestamp finished", info->id);
//Parse value
- ret = parseTelnetMetricValue(&smlData->fields, &smlData->fieldNum, &index, info);
+ ret = parseTelnetMetricValue(&smlData->fields, &smlData->fieldNum, &idx, info);
if (ret) {
tscError("OTD:0x%"PRIx64" Unable to parse metric value", info->id);
return ret;
@@ -386,7 +386,7 @@ static int32_t tscParseTelnetLine(const char* line, TAOS_SML_DATA_POINT* smlData
//Parse tagKVs
SHashObj *keyHashTable = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false);
- ret = parseTelnetTagKvs(&smlData->tags, &smlData->tagNum, &index, &smlData->childTableName, keyHashTable, info);
+ ret = parseTelnetTagKvs(&smlData->tags, &smlData->tagNum, &idx, &smlData->childTableName, keyHashTable, info);
if (ret) {
tscError("OTD:0x%"PRIx64" Unable to parse tags", info->id);
taosHashCleanup(keyHashTable);
diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c
index 454f15829874a51a38428f3ffb420e4704e2151b..665efa4c6dbca0437540ee1dd9875267bc2d8b72 100644
--- a/src/client/src/tscPrepare.c
+++ b/src/client/src/tscPrepare.c
@@ -121,11 +121,11 @@ static int normalStmtAddPart(SNormalStmt* stmt, bool isParam, char* str, uint32_
return TSDB_CODE_SUCCESS;
}
-static int normalStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
+static int normalStmtBindParam(STscStmt* stmt, TAOS_BIND* pBind) {
SNormalStmt* normal = &stmt->normal;
for (uint16_t i = 0; i < normal->numParams; ++i) {
- TAOS_BIND* tb = bind + i;
+ TAOS_BIND* tb = pBind + i;
tVariant* var = normal->params + i;
tVariantDestroy(var);
@@ -383,8 +383,8 @@ int32_t fillTablesColumnsNull(SSqlObj* pSql) {
////////////////////////////////////////////////////////////////////////////////
// functions for insertion statement preparation
-static FORCE_INLINE int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param, TAOS_BIND* bind, int32_t colNum) {
- if (bind->is_null != NULL && *(bind->is_null)) {
+static FORCE_INLINE int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param, TAOS_BIND* pBind, int32_t colNum) {
+ if (pBind->is_null != NULL && *(pBind->is_null)) {
setNull(data + param->offset, param->type, param->bytes);
return TSDB_CODE_SUCCESS;
}
@@ -772,7 +772,7 @@ static FORCE_INLINE int doBindParam(STableDataBlocks* pBlock, char* data, SParam
}
#endif
- if (bind->buffer_type != param->type) {
+ if (pBind->buffer_type != param->type) {
tscError("column type mismatch");
return TSDB_CODE_TSC_INVALID_VALUE;
}
@@ -782,39 +782,39 @@ static FORCE_INLINE int doBindParam(STableDataBlocks* pBlock, char* data, SParam
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT:
case TSDB_DATA_TYPE_UTINYINT:
- *(uint8_t *)(data + param->offset) = *(uint8_t *)bind->buffer;
+ *(uint8_t *)(data + param->offset) = *(uint8_t *)pBind->buffer;
break;
case TSDB_DATA_TYPE_SMALLINT:
case TSDB_DATA_TYPE_USMALLINT:
- *(uint16_t *)(data + param->offset) = *(uint16_t *)bind->buffer;
+ *(uint16_t *)(data + param->offset) = *(uint16_t *)pBind->buffer;
break;
case TSDB_DATA_TYPE_INT:
case TSDB_DATA_TYPE_UINT:
case TSDB_DATA_TYPE_FLOAT:
- *(uint32_t *)(data + param->offset) = *(uint32_t *)bind->buffer;
+ *(uint32_t *)(data + param->offset) = *(uint32_t *)pBind->buffer;
break;
case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_UBIGINT:
case TSDB_DATA_TYPE_DOUBLE:
case TSDB_DATA_TYPE_TIMESTAMP:
- *(uint64_t *)(data + param->offset) = *(uint64_t *)bind->buffer;
+ *(uint64_t *)(data + param->offset) = *(uint64_t *)pBind->buffer;
break;
case TSDB_DATA_TYPE_BINARY:
- if ((*bind->length) > (uintptr_t)param->bytes) {
+ if ((*pBind->length) > (uintptr_t)param->bytes) {
tscError("column length is too big");
return TSDB_CODE_TSC_INVALID_VALUE;
}
- size = (short)*bind->length;
- STR_WITH_SIZE_TO_VARSTR(data + param->offset, bind->buffer, size);
+ size = (short)*pBind->length;
+ STR_WITH_SIZE_TO_VARSTR(data + param->offset, pBind->buffer, size);
return TSDB_CODE_SUCCESS;
case TSDB_DATA_TYPE_NCHAR: {
int32_t output = 0;
- if (!taosMbsToUcs4(bind->buffer, *bind->length, varDataVal(data + param->offset), param->bytes - VARSTR_HEADER_SIZE, &output)) {
+ if (!taosMbsToUcs4(pBind->buffer, *pBind->length, varDataVal(data + param->offset), param->bytes - VARSTR_HEADER_SIZE, &output)) {
tscError("convert nchar failed");
return TSDB_CODE_TSC_INVALID_VALUE;
}
@@ -889,27 +889,27 @@ static int32_t insertStmtGenBlock(STscStmt* pStmt, STableDataBlocks** pBlock, ST
}
-static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MULTI_BIND* bind, int32_t rowNum) {
- if (bind->buffer_type != param->type || !isValidDataType(param->type)) {
+static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MULTI_BIND* pBind, int32_t rowNum) {
+ if (pBind->buffer_type != param->type || !isValidDataType(param->type)) {
tscError("column mismatch or invalid");
return TSDB_CODE_TSC_INVALID_VALUE;
}
- if (IS_VAR_DATA_TYPE(param->type) && bind->length == NULL) {
+ if (IS_VAR_DATA_TYPE(param->type) && pBind->length == NULL) {
tscError("BINARY/NCHAR no length");
return TSDB_CODE_TSC_INVALID_VALUE;
}
- for (int i = 0; i < bind->num; ++i) {
+ for (int i = 0; i < pBind->num; ++i) {
char* data = pBlock->pData + sizeof(SSubmitBlk) + pBlock->rowSize * (rowNum + i);
- if (bind->is_null != NULL && bind->is_null[i]) {
+ if (pBind->is_null != NULL && pBind->is_null[i]) {
setNull(data + param->offset, param->type, param->bytes);
continue;
}
if (!IS_VAR_DATA_TYPE(param->type)) {
- memcpy(data + param->offset, (char *)bind->buffer + bind->buffer_length * i, tDataTypes[param->type].bytes);
+ memcpy(data + param->offset, (char *)pBind->buffer + pBind->buffer_length * i, tDataTypes[param->type].bytes);
if (param->offset == 0) {
if (tsCheckTimestamp(pBlock, data + param->offset) != TSDB_CODE_SUCCESS) {
@@ -918,21 +918,21 @@ static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MU
}
}
} else if (param->type == TSDB_DATA_TYPE_BINARY) {
- if (bind->length[i] > (uintptr_t)param->bytes) {
- tscError("binary length too long, ignore it, max:%d, actual:%d", param->bytes, (int32_t)bind->length[i]);
+ if (pBind->length[i] > (uintptr_t)param->bytes) {
+ tscError("binary length too long, ignore it, max:%d, actual:%d", param->bytes, (int32_t)pBind->length[i]);
return TSDB_CODE_TSC_INVALID_VALUE;
}
- int16_t bsize = (short)bind->length[i];
- STR_WITH_SIZE_TO_VARSTR(data + param->offset, (char *)bind->buffer + bind->buffer_length * i, bsize);
+ int16_t bsize = (short)pBind->length[i];
+ STR_WITH_SIZE_TO_VARSTR(data + param->offset, (char *)pBind->buffer + pBind->buffer_length * i, bsize);
} else if (param->type == TSDB_DATA_TYPE_NCHAR) {
- if (bind->length[i] > (uintptr_t)param->bytes) {
- tscError("nchar string length too long, ignore it, max:%d, actual:%d", param->bytes, (int32_t)bind->length[i]);
+ if (pBind->length[i] > (uintptr_t)param->bytes) {
+ tscError("nchar string length too long, ignore it, max:%d, actual:%d", param->bytes, (int32_t)pBind->length[i]);
return TSDB_CODE_TSC_INVALID_VALUE;
}
int32_t output = 0;
- if (!taosMbsToUcs4((char *)bind->buffer + bind->buffer_length * i, bind->length[i], varDataVal(data + param->offset), param->bytes - VARSTR_HEADER_SIZE, &output)) {
- tscError("convert nchar string to UCS4_LE failed:%s", (char*)((char *)bind->buffer + bind->buffer_length * i));
+ if (!taosMbsToUcs4((char *)pBind->buffer + pBind->buffer_length * i, pBind->length[i], varDataVal(data + param->offset), param->bytes - VARSTR_HEADER_SIZE, &output)) {
+ tscError("convert nchar string to UCS4_LE failed:%s", (char*)((char *)pBind->buffer + pBind->buffer_length * i));
return TSDB_CODE_TSC_INVALID_VALUE;
}
@@ -943,7 +943,7 @@ static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MU
return TSDB_CODE_SUCCESS;
}
-static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
+static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* pBind) {
SSqlCmd* pCmd = &stmt->pSql->cmd;
STscStmt* pStmt = (STscStmt*)stmt;
@@ -995,7 +995,7 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
for (uint32_t j = 0; j < pBlock->numOfParams; ++j) {
SParamInfo* param = &pBlock->params[j];
- int code = doBindParam(pBlock, data, param, &bind[param->idx], 1);
+ int code = doBindParam(pBlock, data, param, &pBind[param->idx], 1);
if (code != TSDB_CODE_SUCCESS) {
tscDebug("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx);
return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "bind column type mismatch or invalid");
@@ -1006,10 +1006,10 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
}
-static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_MULTI_BIND* bind, int colIdx) {
+static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_MULTI_BIND* pBind, int colIdx) {
SSqlCmd* pCmd = &stmt->pSql->cmd;
STscStmt* pStmt = (STscStmt*)stmt;
- int rowNum = bind->num;
+ int rowNum = pBind->num;
STableDataBlocks* pBlock = NULL;
@@ -1063,12 +1063,12 @@ static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_MULTI_BIND* bind, int c
if (colIdx == -1) {
for (uint32_t j = 0; j < pBlock->numOfParams; ++j) {
SParamInfo* param = &pBlock->params[j];
- if (bind[param->idx].num != rowNum) {
- tscError("0x%"PRIx64" param %d: num[%d:%d] not match", pStmt->pSql->self, param->idx, rowNum, bind[param->idx].num);
+ if (pBind[param->idx].num != rowNum) {
+ tscError("0x%"PRIx64" param %d: num[%d:%d] not match", pStmt->pSql->self, param->idx, rowNum, pBind[param->idx].num);
return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "bind row num mismatch");
}
- int code = doBindBatchParam(pBlock, param, &bind[param->idx], pCmd->batchSize);
+ int code = doBindBatchParam(pBlock, param, &pBind[param->idx], pCmd->batchSize);
if (code != TSDB_CODE_SUCCESS) {
tscError("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx);
return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "bind column type mismatch or invalid");
@@ -1079,7 +1079,7 @@ static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_MULTI_BIND* bind, int c
} else {
SParamInfo* param = &pBlock->params[colIdx];
- int code = doBindBatchParam(pBlock, param, bind, pCmd->batchSize);
+ int code = doBindBatchParam(pBlock, param, pBind, pCmd->batchSize);
if (code != TSDB_CODE_SUCCESS) {
tscError("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx);
return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "bind column type mismatch or invalid");
@@ -1312,8 +1312,8 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
return ret;
}
- int32_t index = 0;
- SStrToken sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
+ int32_t idx = 0;
+ SStrToken sToken = tStrGetToken(pCmd->insertParam.sql, &idx, false);
if (sToken.n == 0) {
tscError("table is is expected, sql:%s", pCmd->insertParam.sql);
return tscSQLSyntaxErrMsg(pCmd->payload, "table name is expected", pCmd->insertParam.sql);
@@ -1333,7 +1333,7 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
pStmt->mtb.tagSet = true;
- sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
+ sToken = tStrGetToken(pCmd->insertParam.sql, &idx, false);
if (sToken.n > 0 && (sToken.type == TK_VALUES || sToken.type == TK_LP)) {
return TSDB_CODE_SUCCESS;
}
@@ -1343,14 +1343,14 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
return tscSQLSyntaxErrMsg(pCmd->payload, "keywords USING is expected", sToken.z ? sToken.z : pCmd->insertParam.sql);
}
- sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
+ sToken = tStrGetToken(pCmd->insertParam.sql, &idx, false);
if (sToken.n <= 0 || ((sToken.type != TK_ID) && (sToken.type != TK_STRING))) {
tscError("invalid token, sql:%s", pCmd->insertParam.sql);
return tscSQLSyntaxErrMsg(pCmd->payload, "invalid token", sToken.z ? sToken.z : pCmd->insertParam.sql);
}
pStmt->mtb.stbname = sToken;
- sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
+ sToken = tStrGetToken(pCmd->insertParam.sql, &idx, false);
if (sToken.n <= 0 || ((sToken.type != TK_TAGS) && (sToken.type != TK_LP))) {
tscError("invalid token, sql:%s", pCmd->insertParam.sql);
return tscSQLSyntaxErrMsg(pCmd->payload, "invalid token", sToken.z ? sToken.z : pCmd->insertParam.sql);
@@ -1361,9 +1361,9 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
if (sToken.type == TK_LP) {
pStmt->mtb.tagColSet = true;
pStmt->mtb.tagCols = sToken;
- int32_t tagColsStart = index;
+ int32_t tagColsStart = idx;
while (1) {
- sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
+ sToken = tStrGetToken(pCmd->insertParam.sql, &idx, false);
if (sToken.type == TK_ILLEGAL) {
return tscSQLSyntaxErrMsg(pCmd->payload, "unrecognized token", sToken.z);
}
@@ -1378,16 +1378,16 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
tscError("tag column list expected, sql:%s", pCmd->insertParam.sql);
return tscSQLSyntaxErrMsg(pCmd->payload, "tag column list expected", pCmd->insertParam.sql);
}
- pStmt->mtb.tagCols.n = index - tagColsStart + 1;
+ pStmt->mtb.tagCols.n = idx - tagColsStart + 1;
- sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
+ sToken = tStrGetToken(pCmd->insertParam.sql, &idx, false);
if (sToken.n <= 0 || sToken.type != TK_TAGS) {
tscError("keyword TAGS expected, sql:%s", pCmd->insertParam.sql);
return tscSQLSyntaxErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z ? sToken.z : pCmd->insertParam.sql);
}
}
- sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
+ sToken = tStrGetToken(pCmd->insertParam.sql, &idx, false);
if (sToken.n <= 0 || sToken.type != TK_LP) {
tscError("( expected, sql:%s", pCmd->insertParam.sql);
return tscSQLSyntaxErrMsg(pCmd->payload, "( expected", sToken.z ? sToken.z : pCmd->insertParam.sql);
@@ -1398,7 +1398,7 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
int32_t loopCont = 1;
while (loopCont) {
- sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
+ sToken = tStrGetToken(pCmd->insertParam.sql, &idx, false);
if (sToken.n <= 0) {
tscError("unexpected sql end, sql:%s", pCmd->insertParam.sql);
return tscSQLSyntaxErrMsg(pCmd->payload, "unexpected sql end", pCmd->insertParam.sql);
@@ -1429,7 +1429,7 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
return tscSQLSyntaxErrMsg(pCmd->payload, "not match tags", pCmd->insertParam.sql);
}
- sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
+ sToken = tStrGetToken(pCmd->insertParam.sql, &idx, false);
if (sToken.n <= 0 || (sToken.type != TK_VALUES && sToken.type != TK_LP)) {
tscError("sql error, sql:%s", pCmd->insertParam.sql);
return tscSQLSyntaxErrMsg(pCmd->payload, "sql error", sToken.z ? sToken.z : pCmd->insertParam.sql);
@@ -1944,7 +1944,7 @@ int taos_stmt_close(TAOS_STMT* stmt) {
STMT_RET(TSDB_CODE_SUCCESS);
}
-int taos_stmt_bind_param(TAOS_STMT* stmt, TAOS_BIND* bind) {
+int taos_stmt_bind_param(TAOS_STMT* stmt, TAOS_BIND* pBind) {
STscStmt* pStmt = (STscStmt*)stmt;
STMT_CHECK
@@ -1965,18 +1965,18 @@ int taos_stmt_bind_param(TAOS_STMT* stmt, TAOS_BIND* bind) {
tscDebug("tableId:%" PRIu64 ", try to bind one row", pStmt->mtb.currentUid);
- STMT_RET(insertStmtBindParam(pStmt, bind));
+ STMT_RET(insertStmtBindParam(pStmt, pBind));
} else {
- STMT_RET(normalStmtBindParam(pStmt, bind));
+ STMT_RET(normalStmtBindParam(pStmt, pBind));
}
}
-int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind) {
+int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* pBind) {
STscStmt* pStmt = (STscStmt*)stmt;
STMT_CHECK
- if (bind == NULL || bind->num <= 0 || bind->num > INT16_MAX) {
+ if (pBind == NULL || pBind->num <= 0 || pBind->num > INT16_MAX) {
tscError("0x%"PRIx64" invalid parameter", pStmt->pSql->self);
STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "invalid bind param"));
}
@@ -2000,21 +2000,21 @@ int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind) {
pStmt->last = STMT_BIND;
- STMT_RET(insertStmtBindParamBatch(pStmt, bind, -1));
+ STMT_RET(insertStmtBindParamBatch(pStmt, pBind, -1));
}
-int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int colIdx) {
+int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* pBind, int colIdx) {
STscStmt* pStmt = (STscStmt*)stmt;
STMT_CHECK
- if (bind == NULL) {
+ if (pBind == NULL) {
tscError("0x%" PRIx64 " invalid parameter: bind is NULL", pStmt->pSql->self);
STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "invalid bind param: bind is NULL"));
}
- if (bind->num <= 0 || bind->num > INT16_MAX) {
+ if (pBind->num <= 0 || pBind->num > INT16_MAX) {
char errMsg[128];
- sprintf(errMsg, "invalid parameter: bind->num:%d out of range [0, %d)", bind->num, INT16_MAX);
+ sprintf(errMsg, "invalid parameter: bind->num:%d out of range [0, %d)", pBind->num, INT16_MAX);
tscError("0x%" PRIx64 " %s", pStmt->pSql->self, errMsg);
STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), errMsg));
}
@@ -2045,7 +2045,7 @@ int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, in
pStmt->last = STMT_BIND_COL;
- STMT_RET(insertStmtBindParamBatch(pStmt, bind, colIdx));
+ STMT_RET(insertStmtBindParamBatch(pStmt, pBind, colIdx));
}
int taos_stmt_add_batch(TAOS_STMT* stmt) {
diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c
index 22456782d0341c3ad35299dfa589870ec23bb85a..bb31b752a1bd5856f45dde901c6c589c92063279 100644
--- a/src/client/src/tscSQLParser.c
+++ b/src/client/src/tscSQLParser.c
@@ -47,11 +47,11 @@
#define TSWINDOW_IS_EQUAL(t1, t2) (((t1).skey == (t2).skey) && ((t1).ekey == (t2).ekey))
-// -1 is tbname column index, so here use the -2 as the initial value
+// -1 is tbname column idx, so here use the -2 as the initial value
#define COLUMN_INDEX_INITIAL_VAL (-2)
#define COLUMN_INDEX_INITIALIZER \
{ COLUMN_INDEX_INITIAL_VAL, COLUMN_INDEX_INITIAL_VAL }
-#define COLUMN_INDEX_VALID(index) (((index).tableIndex >= 0) && ((index).columnIndex >= TSDB_MIN_VALID_COLUMN_INDEX))
+#define COLUMN_INDEX_VALID(idx) (((idx).tableIndex >= 0) && ((idx).columnIndex >= TSDB_MIN_VALID_COLUMN_INDEX))
#define TBNAME_LIST_SEP ","
typedef struct SColumnList { // todo refactor
@@ -335,21 +335,21 @@ static int32_t invalidOperationMsg(char* dstBuffer, const char* errMsg) {
}
static int convertTimestampStrToInt64(tVariant *pVar, int32_t precision) {
- int64_t time = 0;
+ int64_t t = 0;
stringProcess(pVar->pz, pVar->nLen);
char* seg = strnchr(pVar->pz, '-', pVar->nLen, false);
if (seg != NULL) {
- if (taosParseTime(pVar->pz, &time, pVar->nLen, precision, tsDaylight) != TSDB_CODE_SUCCESS) {
+ if (taosParseTime(pVar->pz, &t, pVar->nLen, precision, tsDaylight) != TSDB_CODE_SUCCESS) {
return -1;
}
} else {
- if (tVariantDump(pVar, (char*)&time, TSDB_DATA_TYPE_BIGINT, true)) {
+ if (tVariantDump(pVar, (char*)&t, TSDB_DATA_TYPE_BIGINT, true)) {
return -1;
}
}
tVariantDestroy(pVar);
- tVariantCreateFromBinary(pVar, (char*)&time, 0, TSDB_DATA_TYPE_BIGINT);
+ tVariantCreateFromBinary(pVar, (char*)&t, 0, TSDB_DATA_TYPE_BIGINT);
return 0;
}
static int setColumnFilterInfoForTimestamp(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tVariant* pVar) {
@@ -1155,8 +1155,8 @@ static int32_t addPrimaryTsColumnForTimeWindowQuery(SQueryInfo* pQueryInfo, SSql
tstrncpy(s.name, aAggs[TSDB_FUNC_TS].name, sizeof(s.name));
}
- SColumnIndex index = {tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX};
- tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TS, &index, &s, TSDB_COL_NORMAL, 0);
+ SColumnIndex idx = {tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX};
+ tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TS, &idx, &s, TSDB_COL_NORMAL, 0);
return TSDB_CODE_SUCCESS;
}
@@ -1303,17 +1303,17 @@ static int32_t validateStateWindowNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
- SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(col, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ SColumnIndex idx = COLUMN_INDEX_INITIALIZER;
+ if (getColumnIndexByName(col, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
+ STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex);
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
int32_t numOfCols = tscGetNumOfColumns(pTableMeta);
- if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
+ if (idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
- } else if (index.columnIndex >= numOfCols) {
+ } else if (idx.columnIndex >= numOfCols) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
@@ -1322,7 +1322,7 @@ static int32_t validateStateWindowNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS
pGroupExpr->columnInfo = taosArrayInit(4, sizeof(SColIndex));
}
- SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex);
+ SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, idx.columnIndex);
if (pSchema->type == TSDB_DATA_TYPE_TIMESTAMP || pSchema->type == TSDB_DATA_TYPE_FLOAT
|| pSchema->type == TSDB_DATA_TYPE_DOUBLE || pSchema->type == TSDB_DATA_TYPE_NCHAR
|| pSchema->type == TSDB_DATA_TYPE_BINARY) {
@@ -1345,8 +1345,8 @@ static int32_t validateStateWindowNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS
}
}
- tscColumnListInsert(pQueryInfo->colList, index.columnIndex, pTableMeta->id.uid, pSchema);
- SColIndex colIndex = { .colIndex = index.columnIndex, .flag = TSDB_COL_NORMAL, .colId = pSchema->colId };
+ tscColumnListInsert(pQueryInfo->colList, idx.columnIndex, pTableMeta->id.uid, pSchema);
+ SColIndex colIndex = { .colIndex = idx.columnIndex, .flag = TSDB_COL_NORMAL, .colId = pSchema->colId };
taosArrayPush(pGroupExpr->columnInfo, &colIndex);
pQueryInfo->groupbyExpr.orderType = TSDB_ORDER_ASC;
pQueryInfo->stateWindow = true;
@@ -1386,11 +1386,11 @@ int32_t validateSessionNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode * pS
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
- SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(col, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ SColumnIndex idx = COLUMN_INDEX_INITIALIZER;
+ if (getColumnIndexByName(col, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
- if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
+ if (idx.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
@@ -1947,8 +1947,8 @@ static int32_t handleScalarTypeExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
- SColumnIndex index = {.tableIndex = tableIndex};
- SExprInfo* pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_SCALAR_EXPR, &index, pNode->resultType, pNode->resultBytes,
+ SColumnIndex idx = {.tableIndex = tableIndex};
+ SExprInfo* pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_SCALAR_EXPR, &idx, pNode->resultType, pNode->resultBytes,
getNewResColId(pCmd), 0, false);
// set the colId to the result column id
pExpr->base.colInfo.colId = pExpr->base.resColId;
@@ -2130,9 +2130,9 @@ static void addPrimaryTsColIntoResult(SQueryInfo* pQueryInfo, SSqlCmd* pCmd) {
SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, PRIMARYKEY_TIMESTAMP_COL_INDEX);
// add the timestamp column into the output columns
- SColumnIndex index = {0}; // primary timestamp column info
+ SColumnIndex idx = {0}; // primary timestamp column info
int32_t numOfCols = (int32_t)tscNumOfFields(pQueryInfo);
- tscAddFuncInSelectClause(pQueryInfo, numOfCols, TSDB_FUNC_PRJ, &index, pSchema, TSDB_COL_NORMAL, getNewResColId(pCmd));
+ tscAddFuncInSelectClause(pQueryInfo, numOfCols, TSDB_FUNC_PRJ, &idx, pSchema, TSDB_COL_NORMAL, getNewResColId(pCmd));
SInternalField* pSupInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, numOfCols);
pSupInfo->visible = false;
@@ -2394,16 +2394,16 @@ SExprInfo* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t colIndex, int32_t tab
SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, colIndex);
int16_t functionId = (int16_t)((colIndex >= numOfCols) ? TSDB_FUNC_TAGPRJ : TSDB_FUNC_PRJ);
- SColumnIndex index = {.tableIndex = tableIndex,};
+ SColumnIndex idx = {.tableIndex = tableIndex,};
if (functionId == TSDB_FUNC_TAGPRJ) {
- index.columnIndex = colIndex - tscGetNumOfColumns(pTableMeta);
- tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid, pSchema);
+ idx.columnIndex = colIndex - tscGetNumOfColumns(pTableMeta);
+ tscColumnListInsert(pTableMetaInfo->tagColList, idx.columnIndex, pTableMeta->id.uid, pSchema);
} else {
- index.columnIndex = colIndex;
+ idx.columnIndex = colIndex;
}
- return tscExprAppend(pQueryInfo, functionId, &index, pSchema->type, pSchema->bytes, colId, 0,
+ return tscExprAppend(pQueryInfo, functionId, &idx, pSchema->type, pSchema->bytes, colId, 0,
(functionId == TSDB_FUNC_TAGPRJ));
}
@@ -2476,41 +2476,41 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t
if (tokenId == TK_ALL) { // project on all fields
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_PROJECTION_QUERY);
- SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getTableIndexByName(&pItem->pNode->columnName, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ SColumnIndex idx = COLUMN_INDEX_INITIALIZER;
+ if (getTableIndexByName(&pItem->pNode->columnName, pQueryInfo, &idx) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
// all meters columns are required
- if (index.tableIndex == COLUMN_INDEX_INITIAL_VAL) { // all table columns are required.
+ if (idx.tableIndex == COLUMN_INDEX_INITIAL_VAL) { // all table columns are required.
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
- index.tableIndex = i;
- int32_t inc = doAddProjectionExprAndResultFields(pQueryInfo, &index, startPos, pCmd);
+ idx.tableIndex = i;
+ int32_t inc = doAddProjectionExprAndResultFields(pQueryInfo, &idx, startPos, pCmd);
startPos += inc;
}
} else {
- doAddProjectionExprAndResultFields(pQueryInfo, &index, startPos, pCmd);
+ doAddProjectionExprAndResultFields(pQueryInfo, &idx, startPos, pCmd);
}
// add the primary timestamp column even though it is not required by user
- STableMeta* pTableMeta = pQueryInfo->pTableMetaInfo[index.tableIndex]->pTableMeta;
+ STableMeta* pTableMeta = pQueryInfo->pTableMetaInfo[idx.tableIndex]->pTableMeta;
if (pTableMeta->tableType != TSDB_TEMP_TABLE) {
tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMeta->id.uid);
}
} else if (tokenId == TK_STRING || tokenId == TK_INTEGER || tokenId == TK_FLOAT || tokenId == TK_BOOL) { // simple column projection query
- SColumnIndex index = COLUMN_INDEX_INITIALIZER;
+ SColumnIndex idx = COLUMN_INDEX_INITIALIZER;
// user-specified constant value as a new result column
- index.columnIndex = (pQueryInfo->udColumnId--);
- index.tableIndex = 0;
+ idx.columnIndex = (pQueryInfo->udColumnId--);
+ idx.tableIndex = 0;
SSchema colSchema = tGetUserSpecifiedColumnSchema(&pItem->pNode->value, &pItem->pNode->exprToken, pItem->aliasName);
- SExprInfo* pExpr = tscAddFuncInSelectClause(pQueryInfo, startPos, TSDB_FUNC_PRJ, &index, &colSchema, TSDB_COL_UDC,
+ SExprInfo* pExpr = tscAddFuncInSelectClause(pQueryInfo, startPos, TSDB_FUNC_PRJ, &idx, &colSchema, TSDB_COL_UDC,
getNewResColId(pCmd));
tVariantAssign(&pExpr->base.param[pExpr->base.numOfParams++], &pItem->pNode->value);
}else if (tokenId == TK_ID || tokenId == TK_ARROW) {
- SColumnIndex index = COLUMN_INDEX_INITIALIZER;
+ SColumnIndex idx = COLUMN_INDEX_INITIALIZER;
SStrToken* pToken = NULL;
if (tokenId == TK_ARROW){
@@ -2530,35 +2530,35 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t
pToken = &pItem->pNode->columnName;
}
- if (getColumnIndexByName(pToken, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(pToken, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
//for tbname and other pseudo columns
- if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX || TSDB_COL_IS_TSWIN_COL(index.columnIndex)) {
+ if (idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX || TSDB_COL_IS_TSWIN_COL(idx.columnIndex)) {
if (outerQuery) {
- STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
+ STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex);
int32_t numOfCols = tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
bool existed = false;
SSchema* pSchema = pTableMetaInfo->pTableMeta->schema;
for (int32_t i = 0; i < numOfCols; ++i) {
if ((strncasecmp(pSchema[i].name, TSQL_TBNAME_L, tListLen(pSchema[i].name)) == 0 &&
- index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) ||
+ idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX) ||
(strncasecmp(pSchema[i].name, TSQL_TSWIN_START, tListLen(pSchema[i].name)) == 0 &&
- index.columnIndex == TSDB_TSWIN_START_COLUMN_INDEX) ||
+ idx.columnIndex == TSDB_TSWIN_START_COLUMN_INDEX) ||
(strncasecmp(pSchema[i].name, TSQL_TSWIN_STOP, tListLen(pSchema[i].name)) == 0 &&
- index.columnIndex == TSDB_TSWIN_STOP_COLUMN_INDEX) ||
+ idx.columnIndex == TSDB_TSWIN_STOP_COLUMN_INDEX) ||
(strncasecmp(pSchema[i].name, TSQL_TSWIN_DURATION, tListLen(pSchema[i].name)) == 0 &&
- index.columnIndex == TSDB_TSWIN_DURATION_COLUMN_INDEX) ||
+ idx.columnIndex == TSDB_TSWIN_DURATION_COLUMN_INDEX) ||
(strncasecmp(pSchema[i].name, TSQL_QUERY_START, tListLen(pSchema[i].name)) == 0 &&
- index.columnIndex == TSDB_QUERY_START_COLUMN_INDEX) ||
+ idx.columnIndex == TSDB_QUERY_START_COLUMN_INDEX) ||
(strncasecmp(pSchema[i].name, TSQL_QUERY_STOP, tListLen(pSchema[i].name)) == 0 &&
- index.columnIndex == TSDB_QUERY_STOP_COLUMN_INDEX) ||
+ idx.columnIndex == TSDB_QUERY_STOP_COLUMN_INDEX) ||
(strncasecmp(pSchema[i].name, TSQL_QUERY_DURATION, tListLen(pSchema[i].name)) == 0 &&
- index.columnIndex == TSDB_QUERY_DURATION_COLUMN_INDEX)) {
+ idx.columnIndex == TSDB_QUERY_DURATION_COLUMN_INDEX)) {
existed = true;
- index.columnIndex = i;
+ idx.columnIndex = i;
break;
}
}
@@ -2567,47 +2567,47 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
- SSchema colSchema = pSchema[index.columnIndex];
+ SSchema colSchema = pSchema[idx.columnIndex];
char name[TSDB_COL_NAME_LEN] = {0};
getColumnName(pItem, name, colSchema.name, sizeof(colSchema.name) - 1);
tstrncpy(colSchema.name, name, TSDB_COL_NAME_LEN);
- /*SExprInfo* pExpr = */ tscAddFuncInSelectClause(pQueryInfo, startPos, TSDB_FUNC_PRJ, &index, &colSchema,
+ /*SExprInfo* pExpr = */ tscAddFuncInSelectClause(pQueryInfo, startPos, TSDB_FUNC_PRJ, &idx, &colSchema,
TSDB_COL_NORMAL, getNewResColId(pCmd));
} else {
SSchema colSchema;
int16_t functionId, colType;
- if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
+ if (idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
colSchema = *tGetTbnameColumnSchema();
functionId = TSDB_FUNC_TAGPRJ;
colType = TSDB_COL_TAG;
} else {
- if (!timeWindowQuery && (index.columnIndex == TSDB_TSWIN_START_COLUMN_INDEX ||
- index.columnIndex == TSDB_TSWIN_STOP_COLUMN_INDEX ||
- index.columnIndex == TSDB_TSWIN_DURATION_COLUMN_INDEX)) {
+ if (!timeWindowQuery && (idx.columnIndex == TSDB_TSWIN_START_COLUMN_INDEX ||
+ idx.columnIndex == TSDB_TSWIN_STOP_COLUMN_INDEX ||
+ idx.columnIndex == TSDB_TSWIN_DURATION_COLUMN_INDEX)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
}
- colSchema = *tGetTimeWindowColumnSchema(index.columnIndex);
- functionId = getTimeWindowFunctionID(index.columnIndex);
+ colSchema = *tGetTimeWindowColumnSchema(idx.columnIndex);
+ functionId = getTimeWindowFunctionID(idx.columnIndex);
colType = TSDB_COL_NORMAL;
}
char name[TSDB_COL_NAME_LEN] = {0};
getColumnName(pItem, name, colSchema.name, sizeof(colSchema.name) - 1);
tstrncpy(colSchema.name, name, TSDB_COL_NAME_LEN);
- /*SExprInfo* pExpr = */ tscAddFuncInSelectClause(pQueryInfo, startPos, functionId, &index, &colSchema,
+ /*SExprInfo* pExpr = */ tscAddFuncInSelectClause(pQueryInfo, startPos, functionId, &idx, &colSchema,
colType, getNewResColId(pCmd));
}
pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY;
} else {
- STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
+ STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex);
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
- if (index.columnIndex >= tscGetNumOfColumns(pTableMeta) && UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
+ if (idx.columnIndex >= tscGetNumOfColumns(pTableMeta) && UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex);
+ SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, idx.columnIndex);
if (tokenId == TK_ARROW && pSchema->type != TSDB_DATA_TYPE_JSON) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
@@ -2615,12 +2615,12 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
- addProjectQueryCol(pQueryInfo, startPos, &index, pItem, getNewResColId(pCmd));
+ addProjectQueryCol(pQueryInfo, startPos, &idx, pItem, getNewResColId(pCmd));
pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY;
}
// add the primary timestamp column even though it is not required by user
- STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
+ STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex);
if (!UTIL_TABLE_IS_TMP_TABLE(pTableMetaInfo)) {
tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMetaInfo->pTableMeta->id.uid);
}
@@ -2690,24 +2690,24 @@ void setResultColName(char* name, bool finalResult, tSqlExprItem* pItem, int32_t
if (pItem->aliasName != NULL) {
tstrncpy(name, pItem->aliasName, TSDB_COL_NAME_LEN);
} else {
- char uname[TSDB_COL_NAME_LEN] = {0};
+ char colName[TSDB_COL_NAME_LEN] = {0};
int32_t len = MIN(pToken->n + 1, TSDB_COL_NAME_LEN);
- tstrncpy(uname, pToken->z, len);
+ tstrncpy(colName, pToken->z, len);
if (finalResult && tsKeepOriginalColumnName) { // keep the original column name
- tstrncpy(name, uname, TSDB_COL_NAME_LEN);
+ tstrncpy(name, colName, TSDB_COL_NAME_LEN);
} else if (multiCols) {
if (!TSDB_FUNC_IS_SCALAR(functionId)) {
int32_t size = TSDB_COL_NAME_LEN + tListLen(aAggs[functionId].name) + 2 + 1;
char tmp[TSDB_COL_NAME_LEN + tListLen(aAggs[functionId].name) + 2 + 1] = {0};
- snprintf(tmp, size, "%s(%s)", aAggs[functionId].name, uname);
+ snprintf(tmp, size, "%s(%s)", aAggs[functionId].name, colName);
tstrncpy(name, tmp, TSDB_COL_NAME_LEN);
} else {
- int32_t index = TSDB_FUNC_SCALAR_INDEX(functionId);
- int32_t size = TSDB_COL_NAME_LEN + tListLen(aScalarFunctions[index].name) + 2 + 1;
- char tmp[TSDB_COL_NAME_LEN + tListLen(aScalarFunctions[index].name) + 2 + 1] = {0};
- snprintf(tmp, size, "%s(%s)", aScalarFunctions[index].name, uname);
+ int32_t idx = TSDB_FUNC_SCALAR_INDEX(functionId);
+ int32_t size = TSDB_COL_NAME_LEN + tListLen(aScalarFunctions[idx].name) + 2 + 1;
+ char tmp[TSDB_COL_NAME_LEN + tListLen(aScalarFunctions[idx].name) + 2 + 1] = {0};
+ snprintf(tmp, size, "%s(%s)", aScalarFunctions[idx].name, colName);
tstrncpy(name, tmp, TSDB_COL_NAME_LEN);
}
@@ -2793,7 +2793,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
}
SExprInfo* pExpr = NULL;
- SColumnIndex index = COLUMN_INDEX_INITIALIZER;
+ SColumnIndex idx = COLUMN_INDEX_INITIALIZER;
if (pItem->pNode->Expr.paramList != NULL) {
tSqlExprItem* pParamElem = taosArrayGet(pItem->pNode->Expr.paramList, 0);
@@ -2808,47 +2808,47 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
// check if the table name is valid or not
SStrToken tmpToken = pParamElem->pNode->columnName;
- if (getTableIndexByName(&tmpToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getTableIndexByName(&tmpToken, pQueryInfo, &idx) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
- index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
+ idx = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
int32_t size = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes;
- pExpr = tscExprAppend(pQueryInfo, functionId, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pCmd), size,
+ pExpr = tscExprAppend(pQueryInfo, functionId, &idx, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pCmd), size,
false);
} else {
// count the number of table created according to the super table
- if (getColumnIndexByName(pToken, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(pToken, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
- pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
+ pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex);
// count tag is equalled to count(tbname)
bool isTag = false;
- if (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta) ||
- index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
- index.columnIndex = TSDB_TBNAME_COLUMN_INDEX;
+ if (idx.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta) ||
+ idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
+ idx.columnIndex = TSDB_TBNAME_COLUMN_INDEX;
isTag = true;
}
int32_t size = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes;
- pExpr = tscExprAppend(pQueryInfo, functionId, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pCmd), size,
+ pExpr = tscExprAppend(pQueryInfo, functionId, &idx, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pCmd), size,
isTag);
}
} else { // count(*) is equalled to count(primary_timestamp_key)
- index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
+ idx = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
int32_t size = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes;
- pExpr = tscExprAppend(pQueryInfo, functionId, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pCmd), size,
+ pExpr = tscExprAppend(pQueryInfo, functionId, &idx, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pCmd), size,
false);
}
- pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
+ pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex);
memset(pExpr->base.aliasName, 0, tListLen(pExpr->base.aliasName));
getColumnName(pItem, pExpr->base.aliasName, pExpr->base.token, sizeof(pExpr->base.aliasName) - 1);
- SColumnList list = createColumnList(1, index.tableIndex, index.columnIndex);
+ SColumnList list = createColumnList(1, idx.tableIndex, idx.columnIndex);
if (finalResult) {
int32_t numOfOutput = tscNumOfFields(pQueryInfo);
insertResultField(pQueryInfo, numOfOutput, &list, sizeof(int64_t), TSDB_DATA_TYPE_BIGINT, pExpr->base.aliasName,
@@ -2862,7 +2862,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
}
// the time stamp may be always needed
- if (index.tableIndex < tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) {
+ if (idx.tableIndex < tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) {
tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMetaInfo->pTableMeta->id.uid);
}
@@ -2876,6 +2876,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
case TSDB_FUNC_TWA:
case TSDB_FUNC_MIN:
case TSDB_FUNC_MAX:
+ case TSDB_FUNC_MIN_ROW:
+ case TSDB_FUNC_MAX_ROW:
case TSDB_FUNC_DIFF:
case TSDB_FUNC_DERIVATIVE:
case TSDB_FUNC_CSUM:
@@ -2908,18 +2910,18 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
- SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if ((getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) !=
+ SColumnIndex idx = COLUMN_INDEX_INITIALIZER;
+ if ((getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) !=
TSDB_CODE_SUCCESS)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
- pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
- SSchema* pColumnSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex);
+ pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex);
+ SSchema* pColumnSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, idx.columnIndex);
// elapsed only can be applied to primary key
if (functionId == TSDB_FUNC_ELAPSED) {
- if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX ||
+ if (idx.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX ||
pColumnSchema->colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "elapsed only can be applied to primary key");
}
@@ -2945,13 +2947,13 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
STableComInfo info = tscGetTableInfo(pTableMetaInfo->pTableMeta);
// functions can not be applied to tags
- if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX ||
- (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta))) {
+ if (idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX ||
+ (idx.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta))) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
// 2. check if sql function can be applied on this column data type
- SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex);
+ SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, idx.columnIndex);
if (functionId == TSDB_FUNC_MODE && pColumnSchema->colId == PRIMARYKEY_TIMESTAMP_COL_INDEX &&
pColumnSchema->type == TSDB_DATA_TYPE_TIMESTAMP){
@@ -2962,6 +2964,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
} else if (IS_UNSIGNED_NUMERIC_TYPE(pSchema->type) &&
(functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg9);
+ } else if (!IS_NUMERIC_TYPE(pSchema->type) && (functionId == TSDB_FUNC_MIN_ROW || functionId == TSDB_FUNC_MAX_ROW)) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
int16_t resultType = 0;
@@ -2975,7 +2979,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
// set the first column ts for diff query
if (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE || functionId == TSDB_FUNC_CSUM) {
- SColumnIndex indexTS = {.tableIndex = index.tableIndex, .columnIndex = 0};
+ SColumnIndex indexTS = {.tableIndex = idx.tableIndex, .columnIndex = 0};
SExprInfo* pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &indexTS, TSDB_DATA_TYPE_TIMESTAMP,
TSDB_KEYSIZE, 0, TSDB_KEYSIZE, false);
tstrncpy(pExpr->base.aliasName, aAggs[TSDB_FUNC_TS_DUMMY].name, sizeof(pExpr->base.aliasName));
@@ -2986,7 +2990,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
}
if (functionId == TSDB_FUNC_STATE_COUNT || functionId == TSDB_FUNC_STATE_DURATION) {
- SColumnIndex indexTS = {.tableIndex = index.tableIndex, .columnIndex = 0};
+ SColumnIndex indexTS = {.tableIndex = idx.tableIndex, .columnIndex = 0};
SExprInfo* pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_PRJ, &indexTS, TSDB_DATA_TYPE_TIMESTAMP,
TSDB_KEYSIZE, 0, TSDB_KEYSIZE, false);
tstrncpy(pExpr->base.aliasName, aAggs[TSDB_FUNC_TS_DUMMY].name, sizeof(pExpr->base.aliasName));
@@ -2995,15 +2999,15 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
insertResultField(pQueryInfo, colIndex, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP,
aAggs[TSDB_FUNC_TS].name, pExpr);
- pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_PRJ, &index, pSchema->type,
+ pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_PRJ, &idx, pSchema->type,
pSchema->bytes, getNewResColId(pCmd), 0, false);
tstrncpy(pExpr->base.aliasName, pParamElem->pNode->columnName.z, pParamElem->pNode->columnName.n+1);
- ids = createColumnList(1, index.tableIndex, index.columnIndex);
+ ids = createColumnList(1, idx.tableIndex, idx.columnIndex);
insertResultField(pQueryInfo, colIndex + 1, &ids, pExpr->base.resBytes, (int32_t)pExpr->base.resType,
pExpr->base.aliasName, pExpr);
}
- SExprInfo* pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd),
+ SExprInfo* pExpr = tscExprAppend(pQueryInfo, functionId, &idx, resultType, resultSize, getNewResColId(pCmd),
intermediateResSize, false);
if (functionId == TSDB_FUNC_LEASTSQR) { // set the leastsquares parameters
@@ -3118,7 +3122,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
}
}
- SColumnList ids = createColumnList(1, index.tableIndex, index.columnIndex);
+ SColumnList ids = createColumnList(1, idx.tableIndex, idx.columnIndex);
memset(pExpr->base.aliasName, 0, tListLen(pExpr->base.aliasName));
getColumnName(pItem, pExpr->base.aliasName, pExpr->base.token, sizeof(pExpr->base.aliasName) - 1);
@@ -3168,55 +3172,55 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
- SColumnIndex index = COLUMN_INDEX_INITIALIZER;
+ SColumnIndex idx = COLUMN_INDEX_INITIALIZER;
if (pParamElem->pNode->tokenId == TK_ALL) { // select table.*
SStrToken tmpToken = pParamElem->pNode->columnName;
- if (getTableIndexByName(&tmpToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getTableIndexByName(&tmpToken, pQueryInfo, &idx) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
- pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
+ pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex);
SSchema* pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
char name[TSDB_COL_NAME_LEN] = {0};
for (int32_t j = 0; j < tscGetNumOfColumns(pTableMetaInfo->pTableMeta); ++j) {
- index.columnIndex = j;
+ idx.columnIndex = j;
SStrToken t = {.z = pSchema[j].name, .n = (uint32_t)strnlen(pSchema[j].name, TSDB_COL_NAME_LEN)};
setResultColName(name, finalResult, pItem, cvtFunc.originFuncId, &t, true);
- if (setExprInfoForFunctions(pCmd, pQueryInfo, &pSchema[j], cvtFunc, name, colIndex++, &index, finalResult,
+ if (setExprInfoForFunctions(pCmd, pQueryInfo, &pSchema[j], cvtFunc, name, colIndex++, &idx, finalResult,
pUdfInfo) != 0) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
}
} else {
- if (getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) !=
+ if (getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) !=
TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
- pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
+ pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex);
if (pParamElem->pNode->columnName.z == NULL) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
// functions can not be applied to tags
- if ((index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) || (index.columnIndex < 0)) {
+ if ((idx.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) || (idx.columnIndex < 0)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
char name[TSDB_COL_NAME_LEN] = {0};
- SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex);
+ SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, idx.columnIndex);
bool multiColOutput = taosArrayGetSize(pItem->pNode->Expr.paramList) > 1;
setResultColName(name, finalResult, pItem, cvtFunc.originFuncId, &pParamElem->pNode->columnName,
multiColOutput);
- if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, cvtFunc, name, colIndex++, &index, finalResult,
+ if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, cvtFunc, name, colIndex++, &idx, finalResult,
pUdfInfo) != 0) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
@@ -3236,13 +3240,13 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
SSchema* pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
for (int32_t i = 0; i < tscGetNumOfColumns(pTableMetaInfo->pTableMeta); ++i) {
- SColumnIndex index = {.tableIndex = j, .columnIndex = i};
+ SColumnIndex idx = {.tableIndex = j, .columnIndex = i};
char name[TSDB_COL_NAME_LEN] = {0};
SStrToken t = {.z = pSchema[i].name, .n = (uint32_t)strnlen(pSchema[i].name, TSDB_COL_NAME_LEN)};
setResultColName(name, finalResult, pItem, cvtFunc.originFuncId, &t, true);
- if (setExprInfoForFunctions(pCmd, pQueryInfo, &pSchema[index.columnIndex], cvtFunc, name, colIndex, &index,
+ if (setExprInfoForFunctions(pCmd, pQueryInfo, &pSchema[idx.columnIndex], cvtFunc, name, colIndex, &idx,
finalResult, pUdfInfo) != 0) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
@@ -3284,26 +3288,26 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
- SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) !=
+ SColumnIndex idx = COLUMN_INDEX_INITIALIZER;
+ if (getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) !=
TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
- if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
+ if (idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
- pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
- SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex);
+ pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex);
+ SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, idx.columnIndex);
- if (index.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX && pSchema->type == TSDB_DATA_TYPE_TIMESTAMP &&
+ if (idx.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX && pSchema->type == TSDB_DATA_TYPE_TIMESTAMP &&
(functionId == TSDB_FUNC_UNIQUE || functionId == TSDB_FUNC_TAIL)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg29);
}
// functions can not be applied to tags
- if (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) {
+ if (idx.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
@@ -3352,7 +3356,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMetaInfo->pTableMeta->id.uid);
colIndex += 1; // the first column is ts
- pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd),
+ pExpr = tscExprAppend(pQueryInfo, functionId, &idx, resultType, resultSize, getNewResColId(pCmd),
interResult, false);
tscExprAddParams(&pExpr->base, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double));
@@ -3398,12 +3402,12 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
// todo REFACTOR
// set the first column ts for top/bottom query
int32_t tsFuncId = (functionId == TSDB_FUNC_MAVG) ? TSDB_FUNC_TS_DUMMY : TSDB_FUNC_TS;
- SColumnIndex index1 = {index.tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX};
+ SColumnIndex index1 = {idx.tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX};
pExpr = tscExprAppend(pQueryInfo, tsFuncId, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, 0, 0, false);
tstrncpy(pExpr->base.aliasName, aAggs[tsFuncId].name, sizeof(pExpr->base.aliasName));
const int32_t TS_COLUMN_INDEX = PRIMARYKEY_TIMESTAMP_COL_INDEX;
- SColumnList ids = createColumnList(1, index.tableIndex, TS_COLUMN_INDEX);
+ SColumnList ids = createColumnList(1, idx.tableIndex, TS_COLUMN_INDEX);
insertResultField(pQueryInfo, colIndex, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[tsFuncId].name,
pExpr);
@@ -3411,7 +3415,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
getResultDataInfo(pSchema->type, pSchema->bytes, functionId, (int32_t)numRowsSelected, &resultType,
&resultSize, &interResult, 0, false, pUdfInfo);
- pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd),
+ pExpr = tscExprAppend(pQueryInfo, functionId, &idx, resultType, resultSize, getNewResColId(pCmd),
interResult, false);
tscExprAddParams(&pExpr->base, val, TSDB_DATA_TYPE_BIGINT, sizeof(int64_t));
} else {
@@ -3427,19 +3431,19 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
}
// todo REFACTOR
// set the first column ts for top/bottom query
- SColumnIndex index1 = {index.tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX};
+ SColumnIndex index1 = {idx.tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX};
pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, 0, 0, false);
tstrncpy(pExpr->base.aliasName, aAggs[TSDB_FUNC_TS].name, sizeof(pExpr->base.aliasName));
const int32_t TS_COLUMN_INDEX = PRIMARYKEY_TIMESTAMP_COL_INDEX;
- SColumnList ids = createColumnList(1, index.tableIndex, TS_COLUMN_INDEX);
+ SColumnList ids = createColumnList(1, idx.tableIndex, TS_COLUMN_INDEX);
insertResultField(pQueryInfo, colIndex, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP,
aAggs[TSDB_FUNC_TS].name, pExpr);
colIndex += 1; // the first column is ts
getResultDataInfo(pSchema->type, pSchema->bytes, functionId, (int32_t)numRowsSelected, &resultType,
&resultSize, &interResult, 0, false, pUdfInfo);
- pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd),
+ pExpr = tscExprAppend(pQueryInfo, functionId, &idx, resultType, resultSize, getNewResColId(pCmd),
interResult, false);
if (functionId == TSDB_FUNC_TAIL){
int64_t offset = 0;
@@ -3466,7 +3470,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
getColumnName(pItem, pExpr->base.aliasName, pExpr->base.token, sizeof(pExpr->base.aliasName) - 1);
// todo refactor: tscColumnListInsert part
- SColumnList ids = createColumnList(1, index.tableIndex, index.columnIndex);
+ SColumnList ids = createColumnList(1, idx.tableIndex, idx.columnIndex);
if (finalResult) {
insertResultField(pQueryInfo, colIndex, &ids, resultSize, (int8_t)resultType, pExpr->base.aliasName, pExpr);
@@ -3492,46 +3496,46 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
tSqlExprItem* pParamItem = taosArrayGet(pItem->pNode->Expr.paramList, 0);
tSqlExpr* pParam = pParamItem->pNode;
- SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(&pParam->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) !=
+ SColumnIndex idx = COLUMN_INDEX_INITIALIZER;
+ if (getColumnIndexByName(&pParam->columnName, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) !=
TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
- pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
+ pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex);
SSchema* pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
// functions can not be applied to normal columns
int32_t numOfCols = tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
- if (index.columnIndex < numOfCols && index.columnIndex != TSDB_TBNAME_COLUMN_INDEX) {
+ if (idx.columnIndex < numOfCols && idx.columnIndex != TSDB_TBNAME_COLUMN_INDEX) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
- if (index.columnIndex > 0) {
- index.columnIndex -= numOfCols;
+ if (idx.columnIndex > 0) {
+ idx.columnIndex -= numOfCols;
}
// 2. valid the column type
int16_t colType = 0;
- if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
+ if (idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
colType = TSDB_DATA_TYPE_BINARY;
} else {
- colType = pSchema[index.columnIndex].type;
+ colType = pSchema[idx.columnIndex].type;
}
if (colType == TSDB_DATA_TYPE_BOOL) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMetaInfo->pTableMeta->id.uid,
- &pSchema[index.columnIndex]);
+ tscColumnListInsert(pTableMetaInfo->tagColList, idx.columnIndex, pTableMetaInfo->pTableMeta->id.uid,
+ &pSchema[idx.columnIndex]);
SSchema* pTagSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
SSchema s = {0};
- if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
+ if (idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
s = *tGetTbnameColumnSchema();
} else {
- s = pTagSchema[index.columnIndex];
+ s = pTagSchema[idx.columnIndex];
}
int32_t bytes = 0;
@@ -3545,7 +3549,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
s.bytes = bytes;
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY);
- tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TID_TAG, &index, &s, TSDB_COL_TAG, getNewResColId(pCmd));
+ tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TID_TAG, &idx, &s, TSDB_COL_TAG, getNewResColId(pCmd));
return TSDB_CODE_SUCCESS;
}
@@ -3556,11 +3560,11 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
- SColumnIndex index = {
+ SColumnIndex idx = {
.tableIndex = 0,
.columnIndex = 0,
};
- pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
+ pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex);
int32_t inter = 0;
int16_t resType = 0;
@@ -3571,10 +3575,10 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
SSchema s = {.name = "block_dist", .type = TSDB_DATA_TYPE_BINARY, .bytes = bytes};
SExprInfo* pExpr =
- tscExprInsert(pQueryInfo, 0, TSDB_FUNC_BLKINFO, &index, resType, bytes, getNewResColId(pCmd), bytes, 0);
+ tscExprInsert(pQueryInfo, 0, TSDB_FUNC_BLKINFO, &idx, resType, bytes, getNewResColId(pCmd), bytes, 0);
tstrncpy(pExpr->base.aliasName, s.name, sizeof(pExpr->base.aliasName));
- SColumnList ids = createColumnList(1, index.tableIndex, index.columnIndex);
+ SColumnList ids = createColumnList(1, idx.tableIndex, idx.columnIndex);
insertResultField(pQueryInfo, 0, &ids, bytes, s.type, s.name, pExpr);
pExpr->base.numOfParams = 1;
@@ -3595,18 +3599,18 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
- SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) !=
+ SColumnIndex idx = COLUMN_INDEX_INITIALIZER;
+ if (getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) !=
TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
- if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
+ if (idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
- pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
- SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex);
+ pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex);
+ SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, idx.columnIndex);
if (!IS_NUMERIC_TYPE(pSchema->type)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
@@ -3768,7 +3772,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
getResultDataInfo(pSchema->type, pSchema->bytes, functionId, counter, &resultType, &resultSize, &interResult, 0,
false, pUdfInfo);
SExprInfo* pExpr = NULL;
- pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), interResult,
+ pExpr = tscExprAppend(pQueryInfo, functionId, &idx, resultType, resultSize, getNewResColId(pCmd), interResult,
false);
numOutput = numBins - 1;
tscExprAddParams(&pExpr->base, (char*)&numOutput, TSDB_DATA_TYPE_INT, sizeof(int32_t));
@@ -3796,7 +3800,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
memset(pExpr->base.aliasName, 0, tListLen(pExpr->base.aliasName));
getColumnName(pItem, pExpr->base.aliasName, pExpr->base.token, sizeof(pExpr->base.aliasName) - 1);
// todo refactor: tscColumnListInsert part
- SColumnList ids = createColumnList(1, index.tableIndex, index.columnIndex);
+ SColumnList ids = createColumnList(1, idx.tableIndex, idx.columnIndex);
if (finalResult) {
insertResultField(pQueryInfo, colIndex, &ids, resultSize, (int8_t)resultType, pExpr->base.aliasName, pExpr);
@@ -3826,20 +3830,20 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
- SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) !=
+ SColumnIndex idx = COLUMN_INDEX_INITIALIZER;
+ if (getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) !=
TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
- if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
+ if (idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
- pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
+ pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex);
// functions can not be applied to tags
- if (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) {
+ if (idx.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
@@ -3849,21 +3853,21 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
getResultDataInfo(TSDB_DATA_TYPE_INT, 4, functionId, 0, &resType, &bytes, &inter, 0, false, pUdfInfo);
SExprInfo* pExpr =
- tscExprAppend(pQueryInfo, functionId, &index, resType, bytes, getNewResColId(pCmd), inter, false);
+ tscExprAppend(pQueryInfo, functionId, &idx, resType, bytes, getNewResColId(pCmd), inter, false);
memset(pExpr->base.aliasName, 0, tListLen(pExpr->base.aliasName));
getColumnName(pItem, pExpr->base.aliasName, pExpr->base.token, sizeof(pExpr->base.aliasName) - 1);
- SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex);
+ SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, idx.columnIndex);
uint64_t uid = pTableMetaInfo->pTableMeta->id.uid;
- SColumnList ids = createColumnList(1, index.tableIndex, index.columnIndex);
+ SColumnList ids = createColumnList(1, idx.tableIndex, idx.columnIndex);
if (finalResult) {
insertResultField(pQueryInfo, colIndex, &ids, pUdfInfo->resBytes, pUdfInfo->resType, pExpr->base.aliasName,
pExpr);
} else {
for (int32_t i = 0; i < ids.num; ++i) {
- tscColumnListInsert(pQueryInfo->colList, index.columnIndex, uid, pSchema);
+ tscColumnListInsert(pQueryInfo->colList, idx.columnIndex, uid, pSchema);
}
}
tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMetaInfo->pTableMeta->id.uid);
@@ -3880,9 +3884,9 @@ static SColumnList createColumnList(int32_t num, int16_t tableIndex, int32_t col
SColumnList columnList = {0};
columnList.num = num;
- int32_t index = num - 1;
- columnList.ids[index].tableIndex = tableIndex;
- columnList.ids[index].columnIndex = columnIndex;
+ int32_t idx = num - 1;
+ columnList.ids[idx].tableIndex = tableIndex;
+ columnList.ids[idx].columnIndex = columnIndex;
return columnList;
}
@@ -3936,8 +3940,8 @@ static bool isTimeWindowToken(SStrToken* token, int16_t *columnIndex) {
}
}
-static int16_t doGetColumnIndex(SQueryInfo* pQueryInfo, int32_t index, SStrToken* pToken) {
- STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, index)->pTableMeta;
+static int16_t doGetColumnIndex(SQueryInfo* pQueryInfo, int32_t idx, SStrToken* pToken) {
+ STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, idx)->pTableMeta;
int32_t numOfCols = tscGetNumOfColumns(pTableMeta) + tscGetNumOfTags(pTableMeta);
SSchema* pSchema = tscGetTableSchema(pTableMeta);
@@ -3968,10 +3972,6 @@ int32_t doGetColumnIndexByName(SStrToken* pToken, SQueryInfo* pQueryInfo, SColum
const char* msg0 = "ambiguous column name";
const char* msg1 = "invalid column name";
- if (pToken->n == 0) {
- return TSDB_CODE_TSC_INVALID_OPERATION;
- }
-
int16_t tsWinColumnIndex;
if (isTablenameToken(pToken)) {
pIndex->columnIndex = TSDB_TBNAME_COLUMN_INDEX;
@@ -3981,7 +3981,7 @@ int32_t doGetColumnIndexByName(SStrToken* pToken, SQueryInfo* pQueryInfo, SColum
} else if (isTimeWindowToken(pToken, &tsWinColumnIndex)) {
pIndex->columnIndex = tsWinColumnIndex;
} else {
- // not specify the table name, try to locate the table index by column name
+ // not specify the table name, try to locate the table idx by column name
if (pIndex->tableIndex == COLUMN_INDEX_INITIAL_VAL) {
for (int16_t i = 0; i < pQueryInfo->numOfTables; ++i) {
int16_t colIndex = doGetColumnIndex(pQueryInfo, i, pToken);
@@ -3995,7 +3995,7 @@ int32_t doGetColumnIndexByName(SStrToken* pToken, SQueryInfo* pQueryInfo, SColum
}
}
}
- } else { // table index is valid, get the column index
+ } else { // table idx is valid, get the column idx
int16_t colIndex = doGetColumnIndex(pQueryInfo, pIndex->tableIndex, pToken);
if (colIndex != COLUMN_INDEX_INITIAL_VAL) {
pIndex->columnIndex = colIndex;
@@ -4057,6 +4057,12 @@ int32_t getTableIndexByName(SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIn
}
int32_t getColumnIndexByName(const SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex, char* msg) {
+ const char* msg0 = "invalid column name";
+
+ if (pToken->n == 0) {
+ return invalidOperationMsg(msg, msg0);
+ }
+
if (pQueryInfo->pTableMetaInfo == NULL || pQueryInfo->numOfTables == 0) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
@@ -4535,24 +4541,24 @@ int32_t validateGroupbyNode(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd
}
}
- SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(&token, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ SColumnIndex idx = COLUMN_INDEX_INITIALIZER;
+ if (getColumnIndexByName(&token, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
if (tableIndex == COLUMN_INDEX_INITIAL_VAL) {
- tableIndex = index.tableIndex;
- } else if (tableIndex != index.tableIndex) {
+ tableIndex = idx.tableIndex;
+ } else if (tableIndex != idx.tableIndex) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
- pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
+ pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex);
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
- if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
+ if (idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
pSchema = tGetTbnameColumnSchema();
} else {
- pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex);
+ pSchema = tscGetTableColumnSchema(pTableMeta, idx.columnIndex);
}
if (pSchema->type == TSDB_DATA_TYPE_JSON && !pItem->isJsonExp){
@@ -4563,15 +4569,15 @@ int32_t validateGroupbyNode(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd
}
int32_t numOfCols = tscGetNumOfColumns(pTableMeta);
- bool groupTag = (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX || index.columnIndex >= numOfCols);
+ bool groupTag = (idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX || idx.columnIndex >= numOfCols);
if (groupTag) {
if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
- int32_t relIndex = index.columnIndex;
- if (index.columnIndex != TSDB_TBNAME_COLUMN_INDEX) {
+ int32_t relIndex = idx.columnIndex;
+ if (idx.columnIndex != TSDB_TBNAME_COLUMN_INDEX) {
relIndex -= numOfCols;
}
@@ -4587,17 +4593,17 @@ int32_t validateGroupbyNode(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd
taosArrayPush(pGroupExpr->columnInfo, &colIndex);
- index.columnIndex = relIndex;
- tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid, pSchema);
+ idx.columnIndex = relIndex;
+ tscColumnListInsert(pTableMetaInfo->tagColList, idx.columnIndex, pTableMeta->id.uid, pSchema);
} else {
// check if the column type is valid, here only support the bool/tinyint/smallint/bigint group by
if (pSchema->type == TSDB_DATA_TYPE_FLOAT || pSchema->type == TSDB_DATA_TYPE_DOUBLE) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
- tscColumnListInsert(pQueryInfo->colList, index.columnIndex, pTableMeta->id.uid, pSchema);
+ tscColumnListInsert(pQueryInfo->colList, idx.columnIndex, pTableMeta->id.uid, pSchema);
- SColIndex colIndex = { .colIndex = index.columnIndex, .flag = TSDB_COL_NORMAL, .colId = pSchema->colId };
+ SColIndex colIndex = { .colIndex = idx.columnIndex, .flag = TSDB_COL_NORMAL, .colId = pSchema->colId };
strncpy(colIndex.name, pSchema->name, tListLen(colIndex.name));
taosArrayPush(pGroupExpr->columnInfo, &colIndex);
@@ -4929,9 +4935,9 @@ static int32_t checkColumnQueryCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t
return checkColumnQueryCondInfo(pCmd, pQueryInfo, pExpr->pRight, pExpr->tokenId);
} else { // handle leaf node
- SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- addAllColumn(pCmd, pQueryInfo, pExpr, pExpr->tokenId, &index);
- return checkColumnFilterInfo(pCmd, pQueryInfo, &index, pExpr, relOptr);
+ SColumnIndex idx = COLUMN_INDEX_INITIALIZER;
+ addAllColumn(pCmd, pQueryInfo, pExpr, pExpr->tokenId, &idx);
+ return checkColumnFilterInfo(pCmd, pQueryInfo, &idx, pExpr, relOptr);
}
return TSDB_CODE_SUCCESS;
}
@@ -4965,17 +4971,17 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS
pRight = pRight->pLeft;
}
- SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(&pLeft->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ SColumnIndex idx = COLUMN_INDEX_INITIALIZER;
+ if (getColumnIndexByName(&pLeft->columnName, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
- STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
- SSchema* pTagSchema1 = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex);
+ STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex);
+ SSchema* pTagSchema1 = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, idx.columnIndex);
- assert(index.tableIndex >= 0 && index.tableIndex < TSDB_MAX_JOIN_TABLE_NUM);
+ assert(idx.tableIndex >= 0 && idx.tableIndex < TSDB_MAX_JOIN_TABLE_NUM);
- SJoinNode **leftNode = &pQueryInfo->tagCond.joinInfo.joinTables[index.tableIndex];
+ SJoinNode **leftNode = &pQueryInfo->tagCond.joinInfo.joinTables[idx.tableIndex];
if (*leftNode == NULL) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
@@ -4989,9 +4995,9 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
- index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
+ idx.columnIndex = idx.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
if (tscColumnExists(pTableMetaInfo->tagColList, pTagSchema1->colId, pTableMetaInfo->pTableMeta->id.uid) < 0) {
- tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid, pTagSchema1);
+ tscColumnListInsert(pTableMetaInfo->tagColList, idx.columnIndex, pTableMeta->id.uid, pTagSchema1);
atomic_add_fetch_32(&pTableMetaInfo->joinTagNum, 1);
if (pTableMetaInfo->joinTagNum > 1) {
@@ -5000,19 +5006,19 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS
}
}
- int16_t leftIdx = index.tableIndex;
+ int16_t leftIdx = idx.tableIndex;
- index = (SColumnIndex)COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(&pRight->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ idx = (SColumnIndex)COLUMN_INDEX_INITIALIZER;
+ if (getColumnIndexByName(&pRight->columnName, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
- pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
- SSchema* pTagSchema2 = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex);
+ pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex);
+ SSchema* pTagSchema2 = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, idx.columnIndex);
- assert(index.tableIndex >= 0 && index.tableIndex < TSDB_MAX_JOIN_TABLE_NUM);
+ assert(idx.tableIndex >= 0 && idx.tableIndex < TSDB_MAX_JOIN_TABLE_NUM);
- SJoinNode **rightNode = &pQueryInfo->tagCond.joinInfo.joinTables[index.tableIndex];
+ SJoinNode **rightNode = &pQueryInfo->tagCond.joinInfo.joinTables[idx.tableIndex];
if (*rightNode == NULL) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
@@ -5025,10 +5031,10 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
- index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMeta);
+ idx.columnIndex = idx.columnIndex - tscGetNumOfColumns(pTableMeta);
if (tscColumnExists(pTableMetaInfo->tagColList, pTagSchema2->colId, pTableMeta->id.uid) < 0) {
- tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid, pTagSchema2);
+ tscColumnListInsert(pTableMetaInfo->tagColList, idx.columnIndex, pTableMeta->id.uid, pTagSchema2);
atomic_add_fetch_32(&pTableMetaInfo->joinTagNum, 1);
if (pTableMetaInfo->joinTagNum > 1) {
@@ -5037,7 +5043,7 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS
}
}
- int16_t rightIdx = index.tableIndex;
+ int16_t rightIdx = idx.tableIndex;
if (pTagSchema1->type != pTagSchema2->type) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
@@ -5290,14 +5296,14 @@ static int32_t validateSQLExprItem(SSqlCmd* pCmd, tSqlExpr* pExpr,
return ret;
}
} else if (pExpr->type == SQL_NODE_TABLE_COLUMN) {
- SColumnIndex index = COLUMN_INDEX_INITIALIZER;
+ SColumnIndex idx = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(&pExpr->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) !=
+ if (getColumnIndexByName(&pExpr->columnName, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) !=
TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- pList->ids[pList->num++] = index;
+ pList->ids[pList->num++] = idx;
*type = SQLEXPR_TYPE_SCALAR;
} else if (pExpr->type == SQL_NODE_DATA_TYPE) {
if (pExpr->dataType.type < 0 || pExpr->dataType.bytes <= 0) {
@@ -5493,17 +5499,17 @@ static int32_t setNormalExprToCond(tSqlExpr** parent, tSqlExpr* pExpr, int32_t p
}
-static int32_t validateNullExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t index, char* msgBuf) {
+static int32_t validateNullExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t idx, char* msgBuf) {
const char* msg = "only support is [not] null";
tSqlExpr* pRight = pExpr->pRight;
SSchema* pSchema = tscGetTableSchema(pTableMeta);
- if (pRight->tokenId == TK_NULL && pSchema[index].type != TSDB_DATA_TYPE_JSON && (!(pExpr->tokenId == TK_ISNULL || pExpr->tokenId == TK_NOTNULL))) {
+ if (pRight->tokenId == TK_NULL && pSchema[idx].type != TSDB_DATA_TYPE_JSON && (!(pExpr->tokenId == TK_ISNULL || pExpr->tokenId == TK_NOTNULL))) {
return invalidOperationMsg(msgBuf, msg);
}
if (pRight->tokenId == TK_STRING) {
- if (IS_VAR_DATA_TYPE(pSchema[index].type) || pSchema[index].type == TSDB_DATA_TYPE_JSON) {
+ if (IS_VAR_DATA_TYPE(pSchema[idx].type) || pSchema[idx].type == TSDB_DATA_TYPE_JSON) {
return TSDB_CODE_SUCCESS;
}
@@ -5526,7 +5532,7 @@ static int32_t validateNullExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t
}
// check for like expression
-static int32_t validateLikeExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t index, char* msgBuf) {
+static int32_t validateLikeExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t idx, char* msgBuf) {
const char* msg1 = "wildcard string should be less than %d characters";
const char* msg2 = "illegal column type for like";
@@ -5541,7 +5547,7 @@ static int32_t validateLikeExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t
}
SSchema* pSchema = tscGetTableSchema(pTableMeta);
- if ((pLeft->tokenId != TK_ARROW) && (!isTablenameToken(&pLeft->columnName)) && !IS_VAR_DATA_TYPE(pSchema[index].type)) {
+ if ((pLeft->tokenId != TK_ARROW) && (!isTablenameToken(&pLeft->columnName)) && !IS_VAR_DATA_TYPE(pSchema[idx].type)) {
return invalidOperationMsg(msgBuf, msg2);
}
}
@@ -5601,7 +5607,7 @@ static int32_t validateJsonTagExpr(tSqlExpr* pExpr, char* msgBuf) {
}
// check for match expression
-static int32_t validateMatchExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t index, char* msgBuf) {
+static int32_t validateMatchExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t idx, char* msgBuf) {
const char* msg1 = "regular expression string should be less than %d characters";
const char* msg3 = "invalid regular expression";
@@ -5676,7 +5682,7 @@ void convertWhereStringCharset(tSqlExpr* pRight){
free(newData);
}
-static int32_t handleColumnInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SColumnIndex* index) {
+static int32_t handleColumnInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SColumnIndex* idx) {
const char* msg2 = "illegal column name";
int32_t ret = TSDB_CODE_SUCCESS;
if (pExpr == NULL) {
@@ -5685,11 +5691,11 @@ static int32_t handleColumnInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS
if (isComparisonOperator(pExpr)) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
- ret = handleColumnInQueryCond(pCmd, pQueryInfo, pExpr->pLeft, index);
+ ret = handleColumnInQueryCond(pCmd, pQueryInfo, pExpr->pLeft, idx);
if( ret != TSDB_CODE_SUCCESS) {
return ret;
}
- ret = handleColumnInQueryCond(pCmd, pQueryInfo, pExpr->pRight, index);
+ ret = handleColumnInQueryCond(pCmd, pQueryInfo, pExpr->pRight, idx);
return ret;
}
@@ -5701,7 +5707,7 @@ static int32_t handleColumnInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS
}
if (colName) {
- if (getColumnIndexByName(colName, pQueryInfo, index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(colName, pQueryInfo, idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
ret = invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
}
@@ -5729,51 +5735,51 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
int32_t ret = TSDB_CODE_SUCCESS;
- SColumnIndex index = COLUMN_INDEX_INITIALIZER;
+ SColumnIndex idx = COLUMN_INDEX_INITIALIZER;
if (!tSqlExprIsParentOfLeaf(*pExpr)) {
- ret = handleColumnInQueryCond(pCmd, pQueryInfo, pLeft, &index);
+ ret = handleColumnInQueryCond(pCmd, pQueryInfo, pLeft, &idx);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
- ret = handleColumnInQueryCond(pCmd, pQueryInfo, pRight, &index);
+ ret = handleColumnInQueryCond(pCmd, pQueryInfo, pRight, &idx);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
} else {
- if (getColumnIndexByName(colName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(colName, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
}
- *tbIdx = index.tableIndex;
+ *tbIdx = idx.tableIndex;
- STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
+ STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex);
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
- SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex);
+ SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, idx.columnIndex);
// delete where condition check , column must ts or tag
if (delData) {
if (!((pSchema->colId == PRIMARYKEY_TIMESTAMP_COL_INDEX && pSchema->type == TSDB_DATA_TYPE_TIMESTAMP) ||
- index.columnIndex >= tscGetNumOfColumns(pTableMeta) ||
- index.columnIndex == TSDB_TBNAME_COLUMN_INDEX)) {
+ idx.columnIndex >= tscGetNumOfColumns(pTableMeta) ||
+ idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
}
}
// validate the null expression
- int32_t code = validateNullExpr(*pExpr, pTableMeta, index.columnIndex, tscGetErrorMsgPayload(pCmd));
+ int32_t code = validateNullExpr(*pExpr, pTableMeta, idx.columnIndex, tscGetErrorMsgPayload(pCmd));
if (code != TSDB_CODE_SUCCESS) {
return code;
}
// validate the like expression
- code = validateLikeExpr(*pExpr, pTableMeta, index.columnIndex, tscGetErrorMsgPayload(pCmd));
+ code = validateLikeExpr(*pExpr, pTableMeta, idx.columnIndex, tscGetErrorMsgPayload(pCmd));
if (code != TSDB_CODE_SUCCESS) {
return code;
}
// validate the match expression
- code = validateMatchExpr(*pExpr, pTableMeta, index.columnIndex, tscGetErrorMsgPayload(pCmd));
+ code = validateMatchExpr(*pExpr, pTableMeta, idx.columnIndex, tscGetErrorMsgPayload(pCmd));
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@@ -5782,8 +5788,8 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
convertWhereStringCharset(pRight);
}
- if (pSchema->type == TSDB_DATA_TYPE_TIMESTAMP && index.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) { // query on time range
- if (!tSqlExprIsParentOfLeaf(*pExpr) || !validateJoinExprNode(pCmd, pQueryInfo, *pExpr, &index)) {
+ if (pSchema->type == TSDB_DATA_TYPE_TIMESTAMP && idx.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) { // query on time range
+ if (!tSqlExprIsParentOfLeaf(*pExpr) || !validateJoinExprNode(pCmd, pQueryInfo, *pExpr, &idx)) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
@@ -5792,8 +5798,8 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_QUERY);
pCondExpr->tsJoin = true;
- assert(index.tableIndex >= 0 && index.tableIndex < TSDB_MAX_JOIN_TABLE_NUM);
- SJoinNode **leftNode = &pQueryInfo->tagCond.joinInfo.joinTables[index.tableIndex];
+ assert(idx.tableIndex >= 0 && idx.tableIndex < TSDB_MAX_JOIN_TABLE_NUM);
+ SJoinNode **leftNode = &pQueryInfo->tagCond.joinInfo.joinTables[idx.tableIndex];
if (*leftNode == NULL) {
*leftNode = calloc(1, sizeof(SJoinNode));
if (*leftNode == NULL) {
@@ -5801,17 +5807,17 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
}
}
- int16_t leftIdx = index.tableIndex;
+ int16_t leftIdx = idx.tableIndex;
- if (getColumnIndexByName(&pRight->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(&pRight->columnName, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
- if (index.tableIndex < 0 || index.tableIndex >= TSDB_MAX_JOIN_TABLE_NUM) {
+ if (idx.tableIndex < 0 || idx.tableIndex >= TSDB_MAX_JOIN_TABLE_NUM) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
- SJoinNode **rightNode = &pQueryInfo->tagCond.joinInfo.joinTables[index.tableIndex];
+ SJoinNode **rightNode = &pQueryInfo->tagCond.joinInfo.joinTables[idx.tableIndex];
if (*rightNode == NULL) {
*rightNode = calloc(1, sizeof(SJoinNode));
if (*rightNode == NULL) {
@@ -5819,7 +5825,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
}
}
- int16_t rightIdx = index.tableIndex;
+ int16_t rightIdx = idx.tableIndex;
if ((*leftNode)->tsJoin == NULL) {
(*leftNode)->tsJoin = taosArrayInit(2, sizeof(int16_t));
@@ -5855,7 +5861,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
}
*pExpr = NULL; // remove this expression
- } else if (index.columnIndex >= tscGetNumOfColumns(pTableMeta) || index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
+ } else if (idx.columnIndex >= tscGetNumOfColumns(pTableMeta) || idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
// query on tags, check for tag query condition
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
@@ -5870,7 +5876,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
}
if (joinQuery && pRight != NULL && (pRight->tokenId == TK_ID || pRight->tokenId == TK_ARROW)) { // join on tag columns for stable query
- if (!validateJoinExprNode(pCmd, pQueryInfo, *pExpr, &index)) {
+ if (!validateJoinExprNode(pCmd, pQueryInfo, *pExpr, &idx)) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
@@ -6047,15 +6053,15 @@ static void doExtractExprForSTable(SSqlCmd* pCmd, tSqlExpr** pExpr, SQueryInfo*
tSqlExpr* pLeft = (*pExpr)->pLeft;
if (pLeft->tokenId == TK_ARROW || pLeft->tokenId == TK_ID) {
- SColumnIndex index = COLUMN_INDEX_INITIALIZER;
+ SColumnIndex idx = COLUMN_INDEX_INITIALIZER;
if(pLeft->tokenId == TK_ARROW) {
pLeft = pLeft->pLeft;
}
- if (getColumnIndexByName(&pLeft->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(&pLeft->columnName, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return;
}
- if (index.tableIndex != tableIndex) {
+ if (idx.tableIndex != tableIndex) {
return;
}
}
@@ -6181,12 +6187,12 @@ static int32_t convertTimeRangeFromExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t
return code;
}
} else {
- SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(&pExpr->pLeft->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ SColumnIndex idx = COLUMN_INDEX_INITIALIZER;
+ if (getColumnIndexByName(&pExpr->pLeft->columnName, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
- STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
+ STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex);
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
tSqlExpr* pRight = pExpr->pRight;
@@ -6254,27 +6260,27 @@ static void cleanQueryExpr(SCondExpr* pCondExpr) {
static void doAddJoinTagsColumnsIntoTagList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondExpr* pCondExpr) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (QUERY_IS_JOIN_QUERY(pQueryInfo->type) && UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
- SColumnIndex index = COLUMN_INDEX_INITIALIZER;
+ SColumnIndex idx = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(pCmd, &pCondExpr->pJoinExpr->pLeft->ColName, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(pCmd, &pCondExpr->pJoinExpr->pLeft->ColName, pQueryInfo, &idx) != TSDB_CODE_SUCCESS) {
tscError("%p: invalid column name (left)", pQueryInfo);
}
- pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
- index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
+ pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex);
+ idx.columnIndex = idx.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
SSchema* pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
- tscColumnListInsert(pTableMetaInfo->tagColList, &index, &pSchema[index.columnIndex]);
+ tscColumnListInsert(pTableMetaInfo->tagColList, &idx, &pSchema[idx.columnIndex]);
- if (getColumnIndexByName(pCmd, &pCondExpr->pJoinExpr->pRight->ColName, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(pCmd, &pCondExpr->pJoinExpr->pRight->ColName, pQueryInfo, &idx) != TSDB_CODE_SUCCESS) {
tscError("%p: invalid column name (right)", pQueryInfo);
}
- pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
- index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
+ pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex);
+ idx.columnIndex = idx.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
- tscColumnListInsert(pTableMetaInfo->tagColList, &index, &pSchema[index.columnIndex]);
+ tscColumnListInsert(pTableMetaInfo->tagColList, &idx, &pSchema[idx.columnIndex]);
}
}
*/
@@ -6415,10 +6421,10 @@ static int32_t getTagQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondE
size_t num = taosArrayGetSize(colList);
for(int32_t j = 0; j < num; ++j) {
SColIndex* pIndex = taosArrayGet(colList, j);
- SColumnIndex index = {.tableIndex = i, .columnIndex = pIndex->colIndex - numOfCols};
+ SColumnIndex idx = {.tableIndex = i, .columnIndex = pIndex->colIndex - numOfCols};
SSchema* s = tscGetTableSchema(pTableMetaInfo->pTableMeta);
- tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMetaInfo->pTableMeta->id.uid,
+ tscColumnListInsert(pTableMetaInfo->tagColList, idx.columnIndex, pTableMetaInfo->pTableMeta->id.uid,
&s[pIndex->colIndex]);
}
@@ -7057,7 +7063,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
columnName.z = pVar->pz;
}
- SColumnIndex index = COLUMN_INDEX_INITIALIZER;
+ SColumnIndex idx = COLUMN_INDEX_INITIALIZER;
bool udf = false;
if (pQueryInfo->pUdfInfo && taosArrayGetSize(pQueryInfo->pUdfInfo) > 0) {
@@ -7073,7 +7079,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
}
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { // super table query
- if (getColumnIndexByName(&columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(&columnName, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(pMsgBuf, msg1);
}
@@ -7081,8 +7087,8 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
bool orderByTS = false;
bool orderByGroupbyCol = false;
- if (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) { // order by tag1
- int32_t relTagIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
+ if (idx.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) { // order by tag1
+ int32_t relTagIndex = idx.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
// it is a tag column
if (pQueryInfo->groupbyExpr.columnInfo == NULL) {
@@ -7106,7 +7112,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
orderByTags = true;
}
}
- } else if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { // order by tbname
+ } else if (idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { // order by tbname
// it is a tag column
if (pQueryInfo->groupbyExpr.columnInfo == NULL) {
return invalidOperationMsg(pMsgBuf, msg4);
@@ -7115,13 +7121,13 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
if (TSDB_TBNAME_COLUMN_INDEX == pColIndex->colIndex) {
orderByTags = true;
}
- }else if (index.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) { // order by ts
+ }else if (idx.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) { // order by ts
orderByTS = true;
}else{ // order by normal column
SArray *columnInfo = pQueryInfo->groupbyExpr.columnInfo;
if (columnInfo != NULL && taosArrayGetSize(columnInfo) > 0) {
SColIndex* pColIndex = taosArrayGet(columnInfo, 0);
- if (pColIndex->colIndex == index.columnIndex) {
+ if (pColIndex->colIndex == idx.columnIndex) {
orderByGroupbyCol = true;
}
}
@@ -7135,7 +7141,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
if (tscIsDiffDerivLikeQuery(pQueryInfo)) {
return invalidOperationMsg(pMsgBuf, msg12);
}
- //pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
+ //pQueryInfo->groupbyExpr.orderIndex = idx.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
pQueryInfo->groupbyExpr.orderType = pItem->sortOrder;
} else if (orderByGroupbyCol) {
@@ -7154,12 +7160,12 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
pExpr = tscExprGet(pQueryInfo, pos);
- if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
+ if (pExpr->base.colInfo.colIndex != idx.columnIndex && idx.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
return invalidOperationMsg(pMsgBuf, msg5);
}
pQueryInfo->order.order = pItem->sortOrder;
- pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId;
+ pQueryInfo->order.orderColId = pSchema[idx.columnIndex].colId;
} else {
if (udf) {
return invalidOperationMsg(pMsgBuf, msg11);
@@ -7185,27 +7191,27 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
cname.type = pVar->nType;
cname.z = pVar->pz;
}
- if (getColumnIndexByName(&cname, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(&cname, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(pMsgBuf, msg1);
}
- if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
+ if (idx.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
return invalidOperationMsg(pMsgBuf, msg6);
}
pQueryInfo->order.order = pItem->sortOrder;
pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX;
}
} else if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo) || UTIL_TABLE_IS_CHILD_TABLE(pTableMetaInfo)) { // check order by clause for normal table & temp table
- if (getColumnIndexByName(&columnName, pQueryInfo, &index, pMsgBuf) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(&columnName, pQueryInfo, &idx, pMsgBuf) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(pMsgBuf, msg1);
}
- if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX && !isTopBottomUniqueQuery(pQueryInfo)){
+ if (idx.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX && !isTopBottomUniqueQuery(pQueryInfo)){
bool validOrder = false;
SArray *columnInfo = pQueryInfo->groupbyExpr.columnInfo;
if (columnInfo != NULL && taosArrayGetSize(columnInfo) > 0) {
SColIndex* pColIndex = taosArrayGet(columnInfo, 0);
- validOrder = (pColIndex->colIndex == index.columnIndex);
+ validOrder = (pColIndex->colIndex == idx.columnIndex);
}
if (!validOrder) {
@@ -7217,7 +7223,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
if (columnInfo != NULL && taosArrayGetSize(columnInfo) > 0) {
/*SColIndex* pColIndex = taosArrayGet(columnInfo, 0);
- if (pColIndex->colIndex != index.columnIndex) {
+ if (pColIndex->colIndex != idx.columnIndex) {
return invalidOperationMsg(pMsgBuf, msg8);
}*/
} else {
@@ -7228,12 +7234,12 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
pExpr = tscExprGet(pQueryInfo, pos);
- if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
+ if (pExpr->base.colInfo.colIndex != idx.columnIndex && idx.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
return invalidOperationMsg(pMsgBuf, msg5);
}
}
pQueryInfo->order.order = pItem->sortOrder;
- pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId;
+ pQueryInfo->order.orderColId = pSchema[idx.columnIndex].colId;
}else{
pQueryInfo->order.order = pItem->sortOrder;
pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX;
@@ -7247,11 +7253,11 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
// inner subquery.
assert(UTIL_TABLE_IS_TMP_TABLE(pTableMetaInfo) && taosArrayGetSize(pSqlNode->pSortOrder) == 1);
- if (getColumnIndexByName(&columnName, pQueryInfo, &index, pMsgBuf) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(&columnName, pQueryInfo, &idx, pMsgBuf) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(pMsgBuf, msg1);
}
- if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
+ if (idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
return invalidOperationMsg(pMsgBuf, msg1);
}
@@ -7263,7 +7269,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
bool found = false;
for (int32_t i = 0; i < tscNumOfExprs(pQueryInfo); ++i) {
SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
- if (pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId == pSchema[index.columnIndex].colId) {
+ if (pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId == pSchema[idx.columnIndex].colId) {
found = true;
break;
}
@@ -7271,7 +7277,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
if (!found) {
int32_t numOfCols = (int32_t)tscNumOfFields(pQueryInfo);
- tscAddFuncInSelectClause(pQueryInfo, numOfCols, TSDB_FUNC_PRJ, &index, pSchema, TSDB_COL_NORMAL, getNewResColId(pCmd));
+ tscAddFuncInSelectClause(pQueryInfo, numOfCols, TSDB_FUNC_PRJ, &idx, pSchema, TSDB_COL_NORMAL, getNewResColId(pCmd));
SInternalField* pSupInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, numOfCols);
pSupInfo->visible = false;
@@ -7281,7 +7287,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
}
pQueryInfo->order.order = pItem->sortOrder;
- pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId;
+ pQueryInfo->order.orderColId = pSchema[idx.columnIndex].colId;
}
return TSDB_CODE_SUCCESS;
@@ -7394,17 +7400,20 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return invalidOperationMsg(pMsg, msg9);
}
- SColumnIndex index = COLUMN_INDEX_INITIALIZER;
+ SColumnIndex idx = COLUMN_INDEX_INITIALIZER;
+ if (pItem->pVar.nType != TSDB_DATA_TYPE_BINARY) {
+ return invalidOperationMsg(pMsg, msg17);
+ }
SStrToken name = {.z = pItem->pVar.pz, .n = pItem->pVar.nLen};
- if (getColumnIndexByName(&name, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(&name, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
int32_t numOfCols = tscGetNumOfColumns(pTableMeta);
- if (index.columnIndex < numOfCols) {
+ if (idx.columnIndex < numOfCols) {
return invalidOperationMsg(pMsg, msg10);
- } else if (index.columnIndex == numOfCols) {
+ } else if (idx.columnIndex == numOfCols) {
return invalidOperationMsg(pMsg, msg11);
}
@@ -7468,6 +7477,9 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
int16_t numOfTags = tscGetNumOfTags(pTableMeta);
SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER;
+ if (item->pVar.nType != TSDB_DATA_TYPE_BINARY) {
+ return invalidOperationMsg(pMsg, msg17);
+ }
SStrToken name = {.z = item->pVar.pz, .n = item->pVar.nLen};
if (getColumnIndexByName(&name, pQueryInfo, &columnIndex, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
@@ -7613,6 +7625,9 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
tVariantListItem* pItem = taosArrayGet(pAlterSQL->varList, 0);
SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER;
+ if (pItem->pVar.nType != TSDB_DATA_TYPE_BINARY) {
+ return invalidOperationMsg(pMsg, msg17);
+ }
SStrToken name = {.z = pItem->pVar.pz, .n = pItem->pVar.nLen};
if (getColumnIndexByName(&name, pQueryInfo, &columnIndex, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
@@ -7768,10 +7783,20 @@ int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
bool isProjectionFunction = false;
+ bool minMaxRowExists = false;
const char* msg1 = "functions not compatible with interval";
// multi-output set/ todo refactor
size_t size = taosArrayGetSize(pQueryInfo->exprList);
+
+ for (int32_t k = 0; k < size; ++k) {
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, k);
+
+ if (pExpr->base.functionId == TSDB_FUNC_MIN_ROW || pExpr->base.functionId == TSDB_FUNC_MAX_ROW) {
+ minMaxRowExists = true;
+ break;
+ }
+ }
for (int32_t k = 0; k < size; ++k) {
SExprInfo* pExpr = tscExprGet(pQueryInfo, k);
@@ -7792,7 +7817,7 @@ int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQu
}
// projection query on primary timestamp, the selectivity function needs to be present.
- if (pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
+ if (minMaxRowExists || (pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX)) {
bool hasSelectivity = false;
for (int32_t j = 0; j < size; ++j) {
SExprInfo* pEx = tscExprGet(pQueryInfo, j);
@@ -8244,20 +8269,20 @@ void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t subClau
SSchema* pTagSchema = tscGetColumnSchemaById(pTableMetaInfo->pTableMeta, colId);
int16_t colIndex = tscGetTagColIndexById(pTableMetaInfo->pTableMeta, colId);
- SColumnIndex index = {.tableIndex = 0, .columnIndex = colIndex};
+ SColumnIndex idx = {.tableIndex = 0, .columnIndex = colIndex};
char* name = pTagSchema->name;
int16_t type = pTagSchema->type;
int16_t bytes = pTagSchema->bytes;
- pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TAG, &index, type, bytes, getNewResColId(&pSql->cmd), bytes, true);
+ pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TAG, &idx, type, bytes, getNewResColId(&pSql->cmd), bytes, true);
pExpr->base.colInfo.flag = TSDB_COL_TAG;
// NOTE: tag column does not add to source column list
SColumnList ids = {0};
insertResultField(pQueryInfo, (int32_t)size, &ids, bytes, (int8_t)type, name, pExpr);
- int32_t relIndex = index.columnIndex;
+ int32_t relIndex = idx.columnIndex;
pExpr->base.colInfo.colIndex = relIndex;
SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0);
@@ -8296,13 +8321,14 @@ void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex, SSqlC
pInfo->visible = false;
}
-static void doUpdateSqlFunctionForTagPrj(SQueryInfo* pQueryInfo) {
+static void doUpdateSqlFunctionForColTagPrj(SQueryInfo* pQueryInfo) {
int32_t tagLength = 0;
size_t size = taosArrayGetSize(pQueryInfo->exprList);
-//todo is 0??
+ //todo is 0??
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
bool isSTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo);
+ bool minMaxRowExists = false;
for (int32_t i = 0; i < size; ++i) {
SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
@@ -8312,6 +8338,20 @@ static void doUpdateSqlFunctionForTagPrj(SQueryInfo* pQueryInfo) {
} else if (pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
pExpr->base.functionId = TSDB_FUNC_TS_DUMMY; // ts_select ts,top(col,2)
tagLength += pExpr->base.resBytes;
+ } else if (pExpr->base.functionId == TSDB_FUNC_MIN_ROW || pExpr->base.functionId == TSDB_FUNC_MAX_ROW) {
+ minMaxRowExists = true;
+ }
+ }
+
+ if (minMaxRowExists) {
+ for (int32_t i = 0; i < size; ++i) {
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ if (pExpr->base.functionId == TSDB_FUNC_MIN_ROW || pExpr->base.functionId == TSDB_FUNC_MAX_ROW) {
+ continue;
+ } else if (pExpr->base.functionId == TSDB_FUNC_PRJ) {
+ pExpr->base.functionId = TSDB_FUNC_COL_DUMMY;
+ tagLength += pExpr->base.resBytes;
+ }
}
}
@@ -8323,8 +8363,9 @@ static void doUpdateSqlFunctionForTagPrj(SQueryInfo* pQueryInfo) {
continue;
}
- if ((pExpr->base.functionId != TSDB_FUNC_TAG_DUMMY && pExpr->base.functionId != TSDB_FUNC_TS_DUMMY) &&
- !(pExpr->base.functionId == TSDB_FUNC_PRJ && TSDB_COL_IS_UD_COL(pExpr->base.colInfo.flag))) {
+ if ((pExpr->base.functionId != TSDB_FUNC_TAG_DUMMY && pExpr->base.functionId != TSDB_FUNC_TS_DUMMY &&
+ pExpr->base.functionId != TSDB_FUNC_COL_DUMMY)
+ && !(pExpr->base.functionId == TSDB_FUNC_PRJ && TSDB_COL_IS_UD_COL(pExpr->base.colInfo.flag))) {
SSchema* pColSchema = &pSchema[pExpr->base.colInfo.colIndex];
getResultDataInfo(pColSchema->type, pColSchema->bytes, pExpr->base.functionId, (int32_t)pExpr->base.param[0].i64, &pExpr->base.resType,
&pExpr->base.resBytes, &pExpr->base.interBytes, tagLength, isSTable, NULL);
@@ -8453,10 +8494,14 @@ static bool check_expr_in_groupby_colum(SGroupbyExpr* pGroupbyExpr, SExprInfo* p
* 2. if selectivity function and tagprj function both exist, there should be only
* one selectivity function exists.
*/
-static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, char* msg) {
+static int32_t checkUpdateColTagPrjFunctions(SQueryInfo* pQueryInfo, char* msg) {
const char* msg1 = "only one selectivity function allowed in presence of tags function";
const char* msg2 = "aggregation function should not be mixed up with projection";
+ const char* msg3 = "min_row should not be mixed up with max_row";
+ const char* msg4 = "only one selectivity function allowed in presence of min_row or max_row function";
+ bool minRowExists = false;
+ bool maxRowExists = false;
bool tagTsColExists = false;
int16_t numOfScalar = 0;
int16_t numOfSelectivity = 0;
@@ -8473,9 +8518,17 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, char* msg) {
tagTsColExists = true; // selectivity + ts/tag column
break;
}
+ } else if (pExpr->base.functionId == TSDB_FUNC_MIN_ROW) {
+ minRowExists = true;
+ } else if (pExpr->base.functionId == TSDB_FUNC_MAX_ROW) {
+ maxRowExists = true;
}
}
+ if (minRowExists && maxRowExists) {
+ return invalidOperationMsg(msg, msg3);
+ }
+
for (int32_t i = 0; i < numOfExprs; ++i) {
SExprInfo* pExpr = taosArrayGetP(pQueryInfo->exprList, i);
@@ -8514,7 +8567,7 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, char* msg) {
}
}
- if (tagTsColExists) { // check if the selectivity function exists
+ if (tagTsColExists || minRowExists || maxRowExists) { // check if the selectivity function exists
// When the tag projection function on tag column that is not in the group by clause, aggregation function and
// selectivity function exist in select clause is not allowed.
if (numOfAggregation > 0) {
@@ -8525,7 +8578,7 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, char* msg) {
* if numOfSelectivity equals to 0, it is a super table projection query
*/
if (numOfSelectivity == 1) {
- doUpdateSqlFunctionForTagPrj(pQueryInfo);
+ doUpdateSqlFunctionForColTagPrj(pQueryInfo);
int32_t code = doUpdateSqlFunctionForColPrj(pQueryInfo);
if (code != TSDB_CODE_SUCCESS) {
return code;
@@ -8551,11 +8604,17 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, char* msg) {
(functionId == TSDB_FUNC_LAST_DST && (pExpr->base.colInfo.flag & TSDB_COL_NULL) != 0)) {
// do nothing
} else {
- return invalidOperationMsg(msg, msg1);
+ if (tagTsColExists) {
+ return invalidOperationMsg(msg, msg1);
+ }
+
+ if (minRowExists || maxRowExists) {
+ return invalidOperationMsg(msg, msg4);
+ }
}
}
- doUpdateSqlFunctionForTagPrj(pQueryInfo);
+ doUpdateSqlFunctionForColTagPrj(pQueryInfo);
int32_t code = doUpdateSqlFunctionForColPrj(pQueryInfo);
if (code != TSDB_CODE_SUCCESS) {
return code;
@@ -8620,8 +8679,8 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo
int32_t pos = tscGetFirstInvisibleFieldPos(pQueryInfo);
- SColumnIndex index = {.tableIndex = pQueryInfo->groupbyExpr.tableIndex, .columnIndex = colIndex};
- SExprInfo* pExpr = tscExprInsert(pQueryInfo, pos, f, &index, s->type, s->bytes, getNewResColId(pCmd), s->bytes, true);
+ SColumnIndex idx = {.tableIndex = pQueryInfo->groupbyExpr.tableIndex, .columnIndex = colIndex};
+ SExprInfo* pExpr = tscExprInsert(pQueryInfo, pos, f, &idx, s->type, s->bytes, getNewResColId(pCmd), s->bytes, true);
// NOTE: tag column does not add to source column list
SColumnList ids = createColumnList(1, 0, pColIndex->colIndex);
insertResultField(pQueryInfo, pos, &ids, s->bytes, (int8_t)s->type, pColIndex->name, pExpr);
@@ -8779,7 +8838,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char*
}
}
- if (checkUpdateTagPrjFunctions(pQueryInfo, msg) != TSDB_CODE_SUCCESS) {
+ if (checkUpdateColTagPrjFunctions(pQueryInfo, msg) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
@@ -8794,7 +8853,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char*
return TSDB_CODE_SUCCESS;
} else {
- return checkUpdateTagPrjFunctions(pQueryInfo, msg);
+ return checkUpdateColTagPrjFunctions(pQueryInfo, msg);
}
}
@@ -8914,20 +8973,20 @@ int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
{"client_version()", 16},
{"current_user()", 14}};
- int32_t index = -1;
+ int32_t idx = -1;
if (server_status == true) {
- index = 2;
+ idx = 2;
} else {
for (int32_t i = 0; i < tListLen(functionsInfo); ++i) {
if (strncasecmp(functionsInfo[i].name, pExpr->exprToken.z, functionsInfo[i].len) == 0 &&
functionsInfo[i].len == pExpr->exprToken.n) {
- index = i;
+ idx = i;
break;
}
}
}
- switch (index) {
+ switch (idx) {
case 0:
pQueryInfo->command = TSDB_SQL_CURRENT_DB;break;
case 1:
@@ -8946,7 +9005,7 @@ int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
tDataTypes[TSDB_DATA_TYPE_INT].bytes, getNewResColId(pCmd), tDataTypes[TSDB_DATA_TYPE_INT].bytes, false);
tSqlExprItem* item = taosArrayGet(pExprList, 0);
- const char* name = (item->aliasName != NULL)? item->aliasName:functionsInfo[index].name;
+ const char* name = (item->aliasName != NULL)? item->aliasName:functionsInfo[idx].name;
tstrncpy(pExpr1->base.aliasName, name, tListLen(pExpr1->base.aliasName));
return TSDB_CODE_SUCCESS;
@@ -9616,10 +9675,12 @@ int32_t tscGetExprFilters(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelect
pParam = taosArrayGet(pSqlExpr->Expr.paramList, 0);
SStrToken* pToken = &pParam->pNode->columnName;
- SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- getColumnIndexByName(pToken, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd));
- STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
- schema = *tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex);
+ SColumnIndex idx = COLUMN_INDEX_INITIALIZER;
+ if (getColumnIndexByName(pToken, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+ STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex);
+ schema = *tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, idx.columnIndex);
} else {
schema = (SSchema) {.colId = PRIMARYKEY_TIMESTAMP_COL_INDEX, .type = TSDB_DATA_TYPE_TIMESTAMP, .bytes = TSDB_KEYSIZE};
}
@@ -9639,15 +9700,15 @@ int32_t tscGetExprFilters(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelect
//
// if (tSqlExprCompare(pItem->pNode, pSqlExpr) == 0) { // exists, not added it,
//
-// SColumnIndex index = COLUMN_INDEX_INITIALIZER;
+// SColumnIndex idx = COLUMN_INDEX_INITIALIZER;
// int32_t functionId = pSqlExpr->functionId;
// if (pSqlExpr->Expr.paramList == NULL) {
-// index.columnIndex = 0;
-// index.tableIndex = 0;
+// idx.columnIndex = 0;
+// idx.tableIndex = 0;
// } else {
// tSqlExprItem* pParamElem = taosArrayGet(pSqlExpr->Expr.paramList, 0);
// SStrToken* pToken = &pParamElem->pNode->columnName;
-// getColumnIndexByName(pToken, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd));
+// getColumnIndexByName(pToken, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd));
// }
//
// size_t numOfNodeInSel = tscNumOfExprs(pQueryInfo);
@@ -9658,7 +9719,7 @@ int32_t tscGetExprFilters(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelect
// continue;
// }
//
-// if (pExpr1->base.colInfo.colIndex != index.columnIndex) {
+// if (pExpr1->base.colInfo.colIndex != idx.columnIndex) {
// continue;
// }
//
@@ -9847,16 +9908,16 @@ int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelectNode
}
if (pExpr1->tokenId == TK_ID) {
- SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if ((getColumnIndexByName(&pExpr1->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS)) {
+ SColumnIndex idx = COLUMN_INDEX_INITIALIZER;
+ if ((getColumnIndexByName(&pExpr1->columnName, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
+ STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, idx.tableIndex);
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
- if (index.columnIndex <= 0 ||
- index.columnIndex >= tscGetNumOfColumns(pTableMeta)) {
+ if (idx.columnIndex <= 0 ||
+ idx.columnIndex >= tscGetNumOfColumns(pTableMeta)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
@@ -10318,8 +10379,8 @@ static STableMeta* extractTempTableMetaFromSubquery(SQueryInfo* pUpstream) {
return meta;
}
-static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t index, SSqlObj* pSql, SQueryInfo* pQueryInfo, char* msgBuf) {
- SRelElementPair* subInfo = taosArrayGet(pSqlNode->from->list, index);
+static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t idx, SSqlObj* pSql, SQueryInfo* pQueryInfo, char* msgBuf) {
+ SRelElementPair* subInfo = taosArrayGet(pSqlNode->from->list, idx);
// union all is not supported currently
SSqlNode* p = taosArrayGetP(subInfo->pSubquery, 0);
@@ -10776,15 +10837,15 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS
}
return ret;
} else if (pSqlExpr->type == SQL_NODE_TABLE_COLUMN) { // column name, normal column arithmetic expression
- SColumnIndex index = COLUMN_INDEX_INITIALIZER;
+ SColumnIndex idx = COLUMN_INDEX_INITIALIZER;
- int32_t ret = getColumnIndexByName(&pSqlExpr->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd));
+ int32_t ret = getColumnIndexByName(&pSqlExpr->columnName, pQueryInfo, &idx, tscGetErrorMsgPayload(pCmd));
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
- pQueryInfo->curTableIdx = index.tableIndex;
- STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, index.tableIndex)->pTableMeta;
+ pQueryInfo->curTableIdx = idx.tableIndex;
+ STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, idx.tableIndex)->pTableMeta;
int32_t numOfColumns = tscGetNumOfColumns(pTableMeta);
*pExpr = calloc(1, sizeof(tExprNode));
@@ -10793,14 +10854,14 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS
SSchema* pSchema = NULL;
- if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
+ if (idx.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
pSchema = (*pExpr)->pSchema;
strcpy(pSchema->name, tGetTbnameColumnSchema()->name);
pSchema->type = tGetTbnameColumnSchema()->type;
pSchema->colId = tGetTbnameColumnSchema()->colId;
pSchema->bytes = tGetTbnameColumnSchema()->bytes;
} else {
- pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex);
+ pSchema = tscGetTableColumnSchema(pTableMeta, idx.columnIndex);
*(*pExpr)->pSchema = *pSchema;
}
@@ -10808,8 +10869,8 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS
SColIndex colIndex = {0};
tstrncpy(colIndex.name, pSchema->name, sizeof(colIndex.name));
colIndex.colId = pSchema->colId;
- colIndex.colIndex = index.columnIndex;
- colIndex.flag = (index.columnIndex >= numOfColumns) ? 1 : 0;
+ colIndex.colIndex = idx.columnIndex;
+ colIndex.flag = (idx.columnIndex >= numOfColumns) ? 1 : 0;
taosArrayPush(pCols, &colIndex);
}
diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c
index e6d5a94f04aa3e239ba30ca7e5efd0d72fe9c630..b626a5355e6448b103080cbf2bf6e09c1eaf14e3 100644
--- a/src/client/src/tscServer.c
+++ b/src/client/src/tscServer.c
@@ -854,13 +854,13 @@ static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STab
int32_t vgId = -1;
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
- int32_t index = pTableMetaInfo->vgroupIndex;
- assert(index >= 0);
+ int32_t idx = pTableMetaInfo->vgroupIndex;
+ assert(idx >= 0);
SVgroupMsg* pVgroupInfo = NULL;
if (pTableMetaInfo->vgroupList && pTableMetaInfo->vgroupList->numOfVgroups > 0) {
- assert(index < pTableMetaInfo->vgroupList->numOfVgroups);
- pVgroupInfo = &pTableMetaInfo->vgroupList->vgroups[index];
+ assert(idx < pTableMetaInfo->vgroupList->numOfVgroups);
+ pVgroupInfo = &pTableMetaInfo->vgroupList->vgroups[idx];
} else {
tscError("0x%"PRIx64" No vgroup info found", pSql->self);
@@ -870,7 +870,7 @@ static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STab
vgId = pVgroupInfo->vgId;
tscSetDnodeEpSet(&pSql->epSet, pVgroupInfo);
- tscDebug("0x%"PRIx64" query on stable, vgIndex:%d, numOfVgroups:%d", pSql->self, index, pTableMetaInfo->vgroupList->numOfVgroups);
+ tscDebug("0x%"PRIx64" query on stable, vgIndex:%d, numOfVgroups:%d", pSql->self, idx, pTableMetaInfo->vgroupList->numOfVgroups);
} else {
vgId = pTableMeta->vgId;
@@ -892,11 +892,11 @@ static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STab
pQueryMsg->numOfTables = htonl(1); // set the number of tables
pMsg += sizeof(STableIdInfo);
} else { // it is a subquery of the super table query, this EP info is acquired from vgroupInfo
- int32_t index = pTableMetaInfo->vgroupIndex;
+ int32_t idx = pTableMetaInfo->vgroupIndex;
int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables);
- assert(index >= 0 && index < numOfVgroups);
+ assert(idx >= 0 && idx < numOfVgroups);
- SVgroupTableInfo* pTableIdList = taosArrayGet(pTableMetaInfo->pVgroupTables, index);
+ SVgroupTableInfo* pTableIdList = taosArrayGet(pTableMetaInfo->pVgroupTables, idx);
// set the vgroup info
tscSetDnodeEpSet(&pSql->epSet, &pTableIdList->vgInfo);
@@ -906,7 +906,7 @@ static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STab
pQueryMsg->numOfTables = htonl(numOfTables); // set the number of tables
tscDebug("0x%"PRIx64" query on stable, vgId:%d, numOfTables:%d, vgIndex:%d, numOfVgroups:%d", pSql->self,
- pTableIdList->vgInfo.vgId, numOfTables, index, numOfVgroups);
+ pTableIdList->vgInfo.vgId, numOfTables, idx, numOfVgroups);
// serialize each table id info
for(int32_t i = 0; i < numOfTables; ++i) {
@@ -1219,7 +1219,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pQueryMsg->tsBuf.tsOffset = htonl((int32_t)(pMsg - pCmd->payload));
if (pQueryInfo->tsBuf != NULL) {
- // note: here used the index instead of actual vnode id.
+ // note: here used the idx instead of actual vnode id.
int32_t vnodeIndex = pTableMetaInfo->vgroupIndex;
code = dumpFileBlockByGroupId(pQueryInfo->tsBuf, vnodeIndex, pMsg, &pQueryMsg->tsBuf.tsLen, &pQueryMsg->tsBuf.tsNumOfBlocks);
if (code != TSDB_CODE_SUCCESS) {
@@ -1364,10 +1364,10 @@ int32_t tscBuildCreateDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
static bool tscIsAlterCommand(char* sqlstr) {
- int32_t index = 0;
+ int32_t idx = 0;
do {
- SStrToken t0 = tStrGetToken(sqlstr, &index, false);
+ SStrToken t0 = tStrGetToken(sqlstr, &idx, false);
if (t0.type != TK_LP) {
return t0.type == TK_ALTER;
}
@@ -2749,18 +2749,18 @@ int tscProcessShowRsp(SSqlObj *pSql) {
SFieldInfo* pFieldInfo = &pQueryInfo->fieldsInfo;
- SColumnIndex index = {0};
+ SColumnIndex idx = {0};
pSchema = pMetaMsg->schema;
uint64_t uid = pTableMetaInfo->pTableMeta->id.uid;
for (int16_t i = 0; i < pMetaMsg->numOfColumns; ++i, ++pSchema) {
- index.columnIndex = i;
+ idx.columnIndex = i;
tscColumnListInsert(pQueryInfo->colList, i, uid, pSchema);
TAOS_FIELD f = tscCreateField(pSchema->type, pSchema->name, pSchema->bytes);
SInternalField* pInfo = tscFieldInfoAppend(pFieldInfo, &f);
- pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index,
+ pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &idx,
pTableSchema[i].type, pTableSchema[i].bytes, getNewResColId(pCmd), pTableSchema[i].bytes, false);
}
@@ -3587,4 +3587,4 @@ void tscInitMsgsFp() {
tscKeepConn[TSDB_SQL_SELECT] = 1;
tscKeepConn[TSDB_SQL_FETCH] = 1;
tscKeepConn[TSDB_SQL_HB] = 1;
-}
\ No newline at end of file
+}
diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c
index 3fadc7abaf0aa2ae96e87cae77c6ce1a2b3df742..5f1d996b54b76cf922510fccf36dc913f117a47a 100644
--- a/src/client/src/tscSubquery.c
+++ b/src/client/src/tscSubquery.c
@@ -30,7 +30,7 @@
typedef struct SInsertSupporter {
SSqlObj* pSql;
- int32_t index;
+ int32_t idx;
} SInsertSupporter;
static void freeJoinSubqueryObj(SSqlObj* pSql);
@@ -84,14 +84,14 @@ static bool allSubqueryDone(SSqlObj *pParentSql) {
for (int i = 0; i < subState->numOfSub; i++) {
SSqlObj* pSub = pParentSql->pSubs[i];
if (0 == subState->states[i]) {
- tscDebug("0x%"PRIx64" subquery:0x%"PRIx64", index: %d NOT finished yet", pParentSql->self, pSub->self, i);
+ tscDebug("0x%"PRIx64" subquery:0x%"PRIx64", idx: %d NOT finished yet", pParentSql->self, pSub->self, i);
done = false;
break;
} else {
if (pSub != NULL) {
- tscDebug("0x%"PRIx64" subquery:0x%"PRIx64", index: %d finished", pParentSql->self, pSub->self, i);
+ tscDebug("0x%"PRIx64" subquery:0x%"PRIx64", idx: %d finished", pParentSql->self, pSub->self, i);
} else {
- tscDebug("0x%"PRIx64" subquery:%p, index: %d finished", pParentSql->self, pSub, i);
+ tscDebug("0x%"PRIx64" subquery:%p, idx: %d finished", pParentSql->self, pSub, i);
}
}
}
@@ -105,7 +105,7 @@ bool subAndCheckDone(SSqlObj *pSql, SSqlObj *pParentSql, int idx) {
pthread_mutex_lock(&subState->mutex);
- tscDebug("0x%"PRIx64" subquery:0x%"PRIx64", index:%d state set to 1", pParentSql->self, pSql->self, idx);
+ tscDebug("0x%"PRIx64" subquery:0x%"PRIx64", idx:%d state set to 1", pParentSql->self, pSql->self, idx);
subState->states[idx] = 1;
bool done = allSubqueryDone(pParentSql);
@@ -383,7 +383,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) {
// todo handle failed to create sub query
-SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, int32_t index) {
+SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, int32_t idx) {
SJoinSupporter* pSupporter = calloc(1, sizeof(SJoinSupporter));
if (pSupporter == NULL) {
return NULL;
@@ -391,7 +391,7 @@ SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, int32_t index) {
pSupporter->pObj = pSql->self;
- pSupporter->subqueryIndex = index;
+ pSupporter->subqueryIndex = idx;
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
memcpy(&pSupporter->interval, &pQueryInfo->interval, sizeof(pSupporter->interval));
@@ -403,7 +403,7 @@ SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, int32_t index) {
pSupporter->numOfFillVal = pQueryInfo->numOfFillVal;
}
- STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, index);
+ STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, idx);
pSupporter->uid = pTableMetaInfo->pTableMeta->id.uid;
assert (pSupporter->uid != 0);
@@ -614,7 +614,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
* during the timestamp intersection.
*/
pSupporter->limit = pQueryInfo->limit;
- SColumnIndex index = {.tableIndex = 0, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX};
+ SColumnIndex idx = {.tableIndex = 0, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX};
SSchema* s = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, 0);
SExprInfo* pExpr = tscExprGet(pQueryInfo, 0);
@@ -626,7 +626,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
int16_t functionId = tscIsProjectionQuery(pQueryInfo)? TSDB_FUNC_PRJ : TSDB_FUNC_TS;
- tscAddFuncInSelectClause(pQueryInfo, 0, functionId, &index, s, TSDB_COL_NORMAL, getNewResColId(&pNew->cmd));
+ tscAddFuncInSelectClause(pQueryInfo, 0, functionId, &idx, s, TSDB_COL_NORMAL, getNewResColId(&pNew->cmd));
tscPrintSelNodeList(pNew, 0);
tscFieldInfoUpdateOffset(pQueryInfo);
@@ -836,8 +836,8 @@ static void issueTsCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj*
SSchema colSchema = {.type = TSDB_DATA_TYPE_BINARY, .bytes = 1};
- SColumnIndex index = {0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
- SExprInfo *pExpr = tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TS_COMP, &index, &colSchema, TSDB_COL_NORMAL, getNewResColId(pCmd));
+ SColumnIndex idx = {0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
+ SExprInfo *pExpr = tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TS_COMP, &idx, &colSchema, TSDB_COL_NORMAL, getNewResColId(pCmd));
// set the tags value for ts_comp function
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
@@ -1280,7 +1280,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
// todo retry if other subqueries are not failed
assert(numOfRows < 0 && numOfRows == taos_errno(pSql));
- tscError("0x%"PRIx64" sub query failed, code:%s, index:%d", pSql->self, tstrerror(numOfRows), pSupporter->subqueryIndex);
+ tscError("0x%"PRIx64" sub query failed, code:%s, idx:%d", pSql->self, tstrerror(numOfRows), pSupporter->subqueryIndex);
pParentSql->res.code = numOfRows;
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
@@ -1336,7 +1336,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
pTableMetaInfo->vgroupIndex += 1;
assert(pTableMetaInfo->vgroupIndex < totalVgroups);
- tscDebug("0x%"PRIx64" tid_tag from vgroup index:%d completed, try next vgroup:%d. total vgroups:%d. current numOfRes:%d",
+ tscDebug("0x%"PRIx64" tid_tag from vgroup idx:%d completed, try next vgroup:%d. total vgroups:%d. current numOfRes:%d",
pSql->self, pTableMetaInfo->vgroupIndex - 1, pTableMetaInfo->vgroupIndex, totalVgroups, pSupporter->num);
pCmd->command = TSDB_SQL_SELECT;
@@ -1447,7 +1447,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) {
// todo retry if other subqueries are not failed yet
assert(numOfRows < 0 && numOfRows == taos_errno(pSql));
- tscError("0x%"PRIx64" sub query failed, code:%s, index:%d", pSql->self, tstrerror(numOfRows), pSupporter->subqueryIndex);
+ tscError("0x%"PRIx64" sub query failed, code:%s, idx:%d", pSql->self, tstrerror(numOfRows), pSupporter->subqueryIndex);
pParentSql->res.code = numOfRows;
if (quitAllSubquery(pSql, pParentSql, pSupporter)){
@@ -1525,7 +1525,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
pTableMetaInfo->vgroupIndex += 1;
assert(pTableMetaInfo->vgroupIndex < totalVgroups);
- tscDebug("0x%"PRIx64" results from vgroup index:%d completed, try next vgroup:%d. total vgroups:%d. current numOfRes:%" PRId64,
+ tscDebug("0x%"PRIx64" results from vgroup idx:%d completed, try next vgroup:%d. total vgroups:%d. current numOfRes:%" PRId64,
pSql->self, pTableMetaInfo->vgroupIndex - 1, pTableMetaInfo->vgroupIndex, totalVgroups,
pRes->numOfClauseTotal);
@@ -1610,7 +1610,7 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
assert(numOfRows == taos_errno(pSql));
pParentSql->res.code = numOfRows;
- tscError("0x%"PRIx64" retrieve failed, index:%d, code:%s", pSql->self, pSupporter->subqueryIndex, tstrerror(numOfRows));
+ tscError("0x%"PRIx64" retrieve failed, idx:%d, code:%s", pSql->self, pSupporter->subqueryIndex, tstrerror(numOfRows));
tscAsyncResultOnError(pParentSql);
goto _return;
@@ -1670,7 +1670,7 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
pParentSql->res.precision = pRes1->precision;
if (pRes1->row > 0 && pRes1->numOfRows > 0) {
- tscDebug("0x%"PRIx64" sub:0x%"PRIx64" index:%d numOfRows:%d total:%"PRId64 " (not retrieve)", pParentSql->self,
+ tscDebug("0x%"PRIx64" sub:0x%"PRIx64" idx:%d numOfRows:%d total:%"PRId64 " (not retrieve)", pParentSql->self,
pParentSql->pSubs[i]->self, i, pRes1->numOfRows, pRes1->numOfTotal);
assert(pRes1->row < pRes1->numOfRows || (pRes1->row == pRes1->numOfRows && pRes1->completed));
} else {
@@ -1678,7 +1678,7 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
pRes1->numOfClauseTotal += pRes1->numOfRows;
}
- tscDebug("0x%"PRIx64" sub:0x%"PRIx64" index:%d numOfRows:%d total:%"PRId64, pParentSql->self,
+ tscDebug("0x%"PRIx64" sub:0x%"PRIx64" idx:%d numOfRows:%d total:%"PRId64, pParentSql->self,
pParentSql->pSubs[i]->self, i, pRes1->numOfRows, pRes1->numOfTotal);
}
}
@@ -1879,7 +1879,7 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) {
}
}
-// all subqueries return, set the result output index
+// all subqueries return, set the result output idx
void tscSetupOutputColumnIndex(SSqlObj* pSql) {
SSqlCmd* pCmd = &pSql->cmd;
SSqlRes* pRes = &pSql->res;
@@ -2567,7 +2567,7 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
int32_t numOfExprs = (int32_t) tscNumOfExprs(pQueryInfo);
- int32_t index = 0;
+ int32_t idx = 0;
for(int32_t i = 0; i < numOfExprs; ++i) {
SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
if (pExpr->base.functionId == TSDB_FUNC_TS && pQueryInfo->interval.interval > 0) {
@@ -2576,7 +2576,7 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
SColumnIndex colIndex = {.tableIndex = 0, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX};
SSchema* schema = tscGetColumnSchemaById(pTableMetaInfo1->pTableMeta, pExpr->base.colInfo.colId);
- SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_TS, &colIndex, schema, TSDB_COL_NORMAL, getNewResColId(pCmd));
+ SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, idx++, TSDB_FUNC_TS, &colIndex, schema, TSDB_COL_NORMAL, getNewResColId(pCmd));
p->base.resColId = pExpr->base.resColId; // update the result column id
} else if (pExpr->base.functionId == TSDB_FUNC_STDDEV_DST) {
taosArrayPush(pSup->pColsInfo, &pExpr->base.resColId);
@@ -2585,7 +2585,7 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
SSchema schema = {.type = TSDB_DATA_TYPE_DOUBLE, .bytes = sizeof(double)};
tstrncpy(schema.name, pExpr->base.aliasName, tListLen(schema.name));
- SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_AVG, &colIndex, &schema, TSDB_COL_NORMAL, getNewResColId(pCmd));
+ SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, idx++, TSDB_FUNC_AVG, &colIndex, &schema, TSDB_COL_NORMAL, getNewResColId(pCmd));
p->base.resColId = pExpr->base.resColId; // update the result column id
} else if (pExpr->base.functionId == TSDB_FUNC_TAG) {
pSup->tagLen += pExpr->base.resBytes;
@@ -2598,7 +2598,7 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
schema = tGetTbnameColumnSchema();
}
- SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_TAG, &colIndex, schema, TSDB_COL_TAG, getNewResColId(pCmd));
+ SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, idx++, TSDB_FUNC_TAG, &colIndex, schema, TSDB_COL_TAG, getNewResColId(pCmd));
if (schema->type == TSDB_DATA_TYPE_JSON){
p->base.numOfParams = pExpr->base.numOfParams;
tVariantAssign(&p->base.param[0], &pExpr->base.param[0]);
@@ -2616,7 +2616,7 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
SSchema* schema = tscGetColumnSchemaById(pTableMetaInfo1->pTableMeta, pExpr->base.colInfo.colId);
//doLimitOutputNormalColOfGroupby
- SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_PRJ, &colIndex, schema, TSDB_COL_NORMAL, getNewResColId(pCmd));
+ SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, idx++, TSDB_FUNC_PRJ, &colIndex, schema, TSDB_COL_NORMAL, getNewResColId(pCmd));
p->base.numOfParams = 1;
p->base.param[0].i64 = 1;
p->base.param[0].nType = TSDB_DATA_TYPE_INT;
@@ -2658,7 +2658,7 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
"0x%"PRIx64" first round subquery:0x%"PRIx64" tableIndex:%d, vgroupIndex:%d, numOfVgroups:%d, type:%d, query to retrieve timestamps, "
"numOfExpr:%" PRIzu ", colList:%d, numOfOutputFields:%d, name:%s",
pSql->self, pNew->self, 0, pTableMetaInfo->vgroupIndex, pTableMetaInfo->vgroupList->numOfVgroups, pNewQueryInfo->type,
- tscNumOfExprs(pNewQueryInfo), index+1, pNewQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name));
+ tscNumOfExprs(pNewQueryInfo), idx+1, pNewQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name));
pSql->pSubs = calloc(1, POINTER_BYTES);
if (pSql->pSubs == NULL) {
@@ -3281,7 +3281,7 @@ SSqlObj *tscCreateSTableSubquery(SSqlObj *pSql, SRetrieveSupport *trsupport, SSq
assert(trsupport->subqueryIndex < pSql->subState.numOfSub);
- // launch subquery for each vnode, so the subquery index equals to the vgroupIndex.
+ // launch subquery for each vnode, so the subquery idx equals to the vgroupIndex.
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, table_index);
pTableMetaInfo->vgroupIndex = trsupport->subqueryIndex;
@@ -3411,7 +3411,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
}
}
- if (!subAndCheckDone(tres, pParentObj, pSupporter->index)) {
+ if (!subAndCheckDone(tres, pParentObj, pSupporter->idx)) {
// concurrency problem, other thread already release pParentObj
//tscDebug("0x%"PRIx64" insert:%p,%d completed, total:%d", pParentObj->self, tres, suppIdx, pParentObj->subState.numOfSub);
return;
@@ -3495,9 +3495,9 @@ int32_t tscHandleInsertRetry(SSqlObj* pParent, SSqlObj* pSql) {
SSqlRes* pRes = &pSql->res;
SInsertSupporter* pSupporter = (SInsertSupporter*) pSql->param;
- assert(pSupporter->index < pSupporter->pSql->subState.numOfSub);
+ assert(pSupporter->idx < pSupporter->pSql->subState.numOfSub);
- STableDataBlocks* pTableDataBlock = taosArrayGetP(pParent->cmd.insertParam.pDataBlocks, pSupporter->index);
+ STableDataBlocks* pTableDataBlock = taosArrayGetP(pParent->cmd.insertParam.pDataBlocks, pSupporter->idx);
int32_t code = tscCopyDataBlockToPayload(pSql, pTableDataBlock);
if ((pRes->code = code)!= TSDB_CODE_SUCCESS) {
@@ -3524,7 +3524,7 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
SSqlObj* pSub = pSql->pSubs[i];
SInsertSupporter* pSup = calloc(1, sizeof(SInsertSupporter));
- pSup->index = i;
+ pSup->idx = i;
pSup->pSql = pSql;
pSub->param = pSup;
@@ -3572,7 +3572,7 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
}
pSupporter->pSql = pSql;
- pSupporter->index = numOfSub;
+ pSupporter->idx = numOfSub;
SSqlObj *pNew = createSimpleSubObj(pSql, multiVnodeInsertFinalize, pSupporter, TSDB_SQL_INSERT);
if (pNew == NULL) {
@@ -3763,19 +3763,19 @@ void tscBuildResFromSubqueries(SSqlObj *pSql) {
char * getScalarExprInputSrc(void *param, const char *name, int32_t colId) {
SScalarExprSupport*pSupport = (SScalarExprSupport*) param;
- int32_t index = -1;
+ int32_t idx = -1;
SExprInfo* pExpr = NULL;
for (int32_t i = 0; i < pSupport->numOfCols; ++i) {
pExpr = taosArrayGetP(pSupport->exprList, i);
if (strncmp(name, pExpr->base.aliasName, sizeof(pExpr->base.aliasName) - 1) == 0) {
- index = i;
+ idx = i;
break;
}
}
- assert(index >= 0 && index < pSupport->numOfCols);
- return pSupport->data[index] + pSupport->offset * pExpr->base.resBytes;
+ assert(idx >= 0 && idx < pSupport->numOfCols);
+ return pSupport->data[idx] + pSupport->offset * pExpr->base.resBytes;
}
TAOS_ROW doSetResultRowData(SSqlObj *pSql) {
@@ -3815,7 +3815,7 @@ TAOS_ROW doSetResultRowData(SSqlObj *pSql) {
j += 1;
}
- pRes->row++; // index increase one-step
+ pRes->row++; // idx increase one-step
return pRes->tsrow;
}
@@ -3959,7 +3959,7 @@ void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGr
pthread_mutex_init(&pQInfo->lock, NULL);
tsem_init(&pQInfo->ready, 0, 0);
- int32_t index = 0;
+ int32_t idx = 0;
for(int32_t i = 0; i < numOfGroups; ++i) {
SArray* pa = taosArrayGetP(pQueryAttr->tableGroupInfo.pGroupList, i);
@@ -3976,7 +3976,7 @@ void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGr
STableKeyInfo* info = taosArrayGet(pa, j);
window.skey = info->lastKey;
- void* buf = (char*) pQInfo->pBuf + index * sizeof(STableQueryInfo);
+ void* buf = (char*) pQInfo->pBuf + idx * sizeof(STableQueryInfo);
STableQueryInfo* item = createTableQueryInfo(pQueryAttr, info->pTable, pQueryAttr->groupbyColumn, window, buf);
if (item == NULL) {
goto _cleanup;
@@ -3987,7 +3987,7 @@ void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGr
STableId id = {.tid = 0, .uid = 0};
taosHashPut(pRuntimeEnv->tableqinfoGroupInfo.map, &id.tid, sizeof(id.tid), &item, POINTER_BYTES);
- index += 1;
+ idx += 1;
}
}
diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c
index d5369e38f0eb0a64a375d4a30fc05173c6a6aafd..944b85e996db3364181d9a2ce4132e827cc3f406 100644
--- a/src/client/src/tscSystem.c
+++ b/src/client/src/tscSystem.c
@@ -47,7 +47,7 @@ int32_t tscNumOfObj = 0; // number of sqlObj in current process.
static void *tscCheckDiskUsageTmr;
void *tscRpcCache; // cache to keep rpc obj
int32_t tscNumOfThreads = 1; // num of rpc threads
-char tscLogFileName[12] = "taoslog";
+char tscLogFileName[] = "taoslog";
int tscLogFileNum = 10;
static pthread_mutex_t rpcObjMutex; // mutex to protect open the rpc obj concurrently
@@ -87,24 +87,24 @@ int32_t tscAcquireRpc(const char *key, const char *user, const char *secretEncry
return 0;
}
- SRpcInit rpcInit;
- memset(&rpcInit, 0, sizeof(rpcInit));
- rpcInit.localPort = 0;
- rpcInit.label = "TSC";
- rpcInit.numOfThreads = tscNumOfThreads;
- rpcInit.cfp = tscProcessMsgFromServer;
- rpcInit.sessions = tsMaxConnections;
- rpcInit.connType = TAOS_CONN_CLIENT;
- rpcInit.user = (char *)user;
- rpcInit.idleTime = tsShellActivityTimer * 1000;
- rpcInit.ckey = "key";
- rpcInit.spi = 1;
- rpcInit.secret = (char *)secretEncrypt;
+ SRpcInit rpcInitial;
+ memset(&rpcInitial, 0, sizeof(rpcInitial));
+ rpcInitial.localPort = 0;
+ rpcInitial.label = "TSC";
+ rpcInitial.numOfThreads = tscNumOfThreads;
+ rpcInitial.cfp = tscProcessMsgFromServer;
+ rpcInitial.sessions = tsMaxConnections;
+ rpcInitial.connType = TAOS_CONN_CLIENT;
+ rpcInitial.user = (char *)user;
+ rpcInitial.idleTime = tsShellActivityTimer * 1000;
+ rpcInitial.ckey = "key";
+ rpcInitial.spi = 1;
+ rpcInitial.secret = (char *)secretEncrypt;
SRpcObj rpcObj;
memset(&rpcObj, 0, sizeof(rpcObj));
tstrncpy(rpcObj.key, key, sizeof(rpcObj.key));
- rpcObj.pDnodeConn = rpcOpen(&rpcInit);
+ rpcObj.pDnodeConn = rpcOpen(&rpcInitial);
if (rpcObj.pDnodeConn == NULL) {
pthread_mutex_unlock(&rpcObjMutex);
tscError("failed to init connection to server");
diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c
index cfcf2f63eedc4f3dc8b1271e6a3fa47c46244946..c431a731c0dd23db8eaf34ded0715d90fb709819 100644
--- a/src/client/src/tscUtil.c
+++ b/src/client/src/tscUtil.c
@@ -2305,10 +2305,10 @@ void tscCloseTscObj(void *param) {
}
bool tscIsInsertData(char* sqlstr) {
- int32_t index = 0;
+ int32_t idx = 0;
do {
- SStrToken t0 = tStrGetToken(sqlstr, &index, false);
+ SStrToken t0 = tStrGetToken(sqlstr, &idx, false);
if (t0.type != TK_LP) {
return t0.type == TK_INSERT || t0.type == TK_IMPORT;
}
@@ -2378,12 +2378,12 @@ SInternalField* tscFieldInfoAppend(SFieldInfo* pFieldInfo, TAOS_FIELD* pField) {
return taosArrayPush(pFieldInfo->internalField, &info);
}
-SInternalField* tscFieldInfoInsert(SFieldInfo* pFieldInfo, int32_t index, TAOS_FIELD* field) {
+SInternalField* tscFieldInfoInsert(SFieldInfo* pFieldInfo, int32_t idx, TAOS_FIELD* field) {
pFieldInfo->numOfOutput++;
struct SInternalField info = { .pExpr = NULL, .visible = true };
info.field = *field;
- return taosArrayInsert(pFieldInfo->internalField, index, &info);
+ return taosArrayInsert(pFieldInfo->internalField, idx, &info);
}
void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo) {
@@ -2398,18 +2398,18 @@ void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo) {
}
}
-SInternalField* tscFieldInfoGetInternalField(SFieldInfo* pFieldInfo, int32_t index) {
- assert(index < pFieldInfo->numOfOutput);
- return TARRAY_GET_ELEM(pFieldInfo->internalField, index);
+SInternalField* tscFieldInfoGetInternalField(SFieldInfo* pFieldInfo, int32_t idx) {
+ assert(idx < pFieldInfo->numOfOutput);
+ return TARRAY_GET_ELEM(pFieldInfo->internalField, idx);
}
-TAOS_FIELD* tscFieldInfoGetField(SFieldInfo* pFieldInfo, int32_t index) {
- assert(index < pFieldInfo->numOfOutput);
- return &((SInternalField*)TARRAY_GET_ELEM(pFieldInfo->internalField, index))->field;
+TAOS_FIELD* tscFieldInfoGetField(SFieldInfo* pFieldInfo, int32_t idx) {
+ assert(idx < pFieldInfo->numOfOutput);
+ return &((SInternalField*)TARRAY_GET_ELEM(pFieldInfo->internalField, idx))->field;
}
-int32_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index) {
- SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, index);
+int32_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t idx) {
+ SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, idx);
assert(pInfo != NULL && pInfo->pExpr->pExpr == NULL);
return pInfo->pExpr->base.offset;
@@ -2635,16 +2635,16 @@ SExprInfo* tscExprCreate(STableMetaInfo* pTableMetaInfo, int16_t functionId, SCo
return pExpr;
}
-SExprInfo* tscExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
+SExprInfo* tscExprInsert(SQueryInfo* pQueryInfo, int32_t idx, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
int16_t size, int16_t resColId, int32_t interSize, bool isTagCol) {
int32_t num = (int32_t)taosArrayGetSize(pQueryInfo->exprList);
- if (index == num) {
+ if (idx == num) {
return tscExprAppend(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol);
}
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pColIndex->tableIndex);
SExprInfo* pExpr = tscExprCreate(pTableMetaInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol);
- taosArrayInsert(pQueryInfo->exprList, index, &pExpr);
+ taosArrayInsert(pQueryInfo->exprList, idx, &pExpr);
return pExpr;
}
@@ -2656,10 +2656,10 @@ SExprInfo* tscExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnInde
return pExpr;
}
-SExprInfo* tscExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, int16_t srcColumnIndex,
+SExprInfo* tscExprUpdate(SQueryInfo* pQueryInfo, int32_t idx, int16_t functionId, int16_t srcColumnIndex,
int16_t type, int32_t size) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
- SExprInfo* pExpr = tscExprGet(pQueryInfo, index);
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, idx);
if (pExpr == NULL) {
return NULL;
}
@@ -2676,8 +2676,8 @@ SExprInfo* tscExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t function
return pExpr;
}
-bool tscMultiRoundQuery(SQueryInfo* pQueryInfo, int32_t index) {
- if (!UTIL_TABLE_IS_SUPER_TABLE(pQueryInfo->pTableMetaInfo[index])) {
+bool tscMultiRoundQuery(SQueryInfo* pQueryInfo, int32_t idx) {
+ if (!UTIL_TABLE_IS_SUPER_TABLE(pQueryInfo->pTableMetaInfo[idx])) {
return false;
}
@@ -2725,8 +2725,8 @@ void tscExprAddParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t byt
assert(pExpr->numOfParams <= 3);
}
-SExprInfo* tscExprGet(SQueryInfo* pQueryInfo, int32_t index) {
- return taosArrayGetP(pQueryInfo->exprList, index);
+SExprInfo* tscExprGet(SQueryInfo* pQueryInfo, int32_t idx) {
+ return taosArrayGetP(pQueryInfo->exprList, idx);
}
/*
@@ -3014,6 +3014,10 @@ int32_t tscValidateName(SStrToken* pToken, bool escapeEnabled, bool *dbIncluded)
// single token, validate it
if (len == pToken->n) {
+ if (taosIsKeyWordToken(pToken->z, (int32_t) pToken->n)) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+
return validateQuoteToken(pToken, escapeEnabled, NULL);
} else {
sep = strnchr(pToken->z, TS_PATH_DELIMITER[0], pToken->n, true);
@@ -3297,8 +3301,8 @@ void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SQueryInfo* pQueryInfo) {
if (TSDB_COL_IS_TAG(pExpr->base.colInfo.flag)) {
SSchema* pTagSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
- int16_t index = pExpr->base.colInfo.colIndex;
- pColInfo[i].type = (index != -1) ? pTagSchema[index].type : TSDB_DATA_TYPE_BINARY;
+ int16_t idx = pExpr->base.colInfo.colIndex;
+ pColInfo[i].type = (idx != -1) ? pTagSchema[idx].type : TSDB_DATA_TYPE_BINARY;
} else {
pColInfo[i].type = pSchema[pExpr->base.colInfo.colIndex].type;
}
@@ -3381,7 +3385,7 @@ SQueryInfo* tscGetQueryInfoS(SSqlCmd* pCmd) {
return pQueryInfo;
}
-STableMetaInfo* tscGetTableMetaInfoByUid(SQueryInfo* pQueryInfo, uint64_t uid, int32_t* index) {
+STableMetaInfo* tscGetTableMetaInfoByUid(SQueryInfo* pQueryInfo, uint64_t uid, int32_t* idx) {
int32_t k = -1;
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
@@ -3391,8 +3395,8 @@ STableMetaInfo* tscGetTableMetaInfoByUid(SQueryInfo* pQueryInfo, uint64_t uid, i
}
}
- if (index != NULL) {
- *index = k;
+ if (idx != NULL) {
+ *idx = k;
}
assert(k != -1);
@@ -3615,19 +3619,19 @@ void tscFreeVgroupTableInfo(SArray* pVgroupTables) {
taosArrayDestroy(&pVgroupTables);
}
-void tscRemoveVgroupTableGroup(SArray* pVgroupTable, int32_t index) {
- assert(pVgroupTable != NULL && index >= 0);
+void tscRemoveVgroupTableGroup(SArray* pVgroupTable, int32_t idx) {
+ assert(pVgroupTable != NULL && idx >= 0);
size_t size = taosArrayGetSize(pVgroupTable);
- assert(size > index);
+ assert(size > idx);
- SVgroupTableInfo* pInfo = taosArrayGet(pVgroupTable, index);
+ SVgroupTableInfo* pInfo = taosArrayGet(pVgroupTable, idx);
// for(int32_t j = 0; j < pInfo->vgInfo.numOfEps; ++j) {
// tfree(pInfo->vgInfo.epAddr[j].fqdn);
// }
taosArrayDestroy(&pInfo->itemList);
- taosArrayRemove(pVgroupTable, index);
+ taosArrayRemove(pVgroupTable, idx);
}
void tscVgroupTableCopy(SVgroupTableInfo* info, SVgroupTableInfo* pInfo) {
@@ -4102,15 +4106,15 @@ static void tscSubqueryRetrieveCallback(void* param, TAOS_RES* tres, int code) {
SSqlObj* pParentSql = ps->pParentSql;
SSqlObj* pSql = tres;
- int32_t index = ps->subqueryIndex;
- bool ret = subAndCheckDone(pSql, pParentSql, index);
+ int32_t idx = ps->subqueryIndex;
+ bool ret = subAndCheckDone(pSql, pParentSql, idx);
// TODO refactor
tfree(ps);
pSql->param = NULL;
if (!ret) {
- tscDebug("0x%"PRIx64" sub:0x%"PRIx64" orderOfSub:%d completed, not all subquery finished", pParentSql->self, pSql->self, index);
+ tscDebug("0x%"PRIx64" sub:0x%"PRIx64" orderOfSub:%d completed, not all subquery finished", pParentSql->self, pSql->self, idx);
return;
}
@@ -4131,13 +4135,13 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) {
if (pSql->res.code != TSDB_CODE_SUCCESS) {
SSqlObj* pParentSql = ps->pParentSql;
- int32_t index = ps->subqueryIndex;
- bool ret = subAndCheckDone(pSql, pParentSql, index);
+ int32_t idx = ps->subqueryIndex;
+ bool ret = subAndCheckDone(pSql, pParentSql, idx);
tscFreeRetrieveSup(&pSql->param);
if (!ret) {
- tscDebug("0x%"PRIx64" sub:0x%"PRIx64" orderOfSub:%d completed, not all subquery finished", pParentSql->self, pSql->self, index);
+ tscDebug("0x%"PRIx64" sub:0x%"PRIx64" orderOfSub:%d completed, not all subquery finished", pParentSql->self, pSql->self, idx);
return;
}
diff --git a/src/common/src/tarithoperator.c b/src/common/src/tarithoperator.c
index ca5d247dd60eb0865c2a02b5d0e911185280f0fa..71702e124913e126df1dd6fbdd202003e5f117e5 100644
--- a/src/common/src/tarithoperator.c
+++ b/src/common/src/tarithoperator.c
@@ -60,40 +60,40 @@ void calc_i32_i32_add(void *left, void *right, int32_t numLeft, int32_t numRight
}
}
-typedef double (*_arithmetic_getVectorDoubleValue_fn_t)(void *src, int32_t index);
+typedef double (*_arithmetic_getVectorDoubleValue_fn_t)(void *src, int32_t idx);
-double getVectorDoubleValue_TINYINT(void *src, int32_t index) {
- return (double)*((int8_t *)src + index);
+double getVectorDoubleValue_TINYINT(void *src, int32_t idx) {
+ return (double)*((int8_t *)src + idx);
}
-double getVectorDoubleValue_UTINYINT(void *src, int32_t index) {
- return (double)*((uint8_t *)src + index);
+double getVectorDoubleValue_UTINYINT(void *src, int32_t idx) {
+ return (double)*((uint8_t *)src + idx);
}
-double getVectorDoubleValue_SMALLINT(void *src, int32_t index) {
- return (double)*((int16_t *)src + index);
+double getVectorDoubleValue_SMALLINT(void *src, int32_t idx) {
+ return (double)*((int16_t *)src + idx);
}
-double getVectorDoubleValue_USMALLINT(void *src, int32_t index) {
- return (double)*((uint16_t *)src + index);
+double getVectorDoubleValue_USMALLINT(void *src, int32_t idx) {
+ return (double)*((uint16_t *)src + idx);
}
-double getVectorDoubleValue_INT(void *src, int32_t index) {
- return (double)*((int32_t *)src + index);
+double getVectorDoubleValue_INT(void *src, int32_t idx) {
+ return (double)*((int32_t *)src + idx);
}
-double getVectorDoubleValue_UINT(void *src, int32_t index) {
- return (double)*((uint32_t *)src + index);
+double getVectorDoubleValue_UINT(void *src, int32_t idx) {
+ return (double)*((uint32_t *)src + idx);
}
-double getVectorDoubleValue_BIGINT(void *src, int32_t index) {
- return (double)*((int64_t *)src + index);
+double getVectorDoubleValue_BIGINT(void *src, int32_t idx) {
+ return (double)*((int64_t *)src + idx);
}
-double getVectorDoubleValue_UBIGINT(void *src, int32_t index) {
- return (double)*((uint64_t *)src + index);
+double getVectorDoubleValue_UBIGINT(void *src, int32_t idx) {
+ return (double)*((uint64_t *)src + idx);
}
-double getVectorDoubleValue_FLOAT(void *src, int32_t index) {
- return (double)*((float *)src + index);
+double getVectorDoubleValue_FLOAT(void *src, int32_t idx) {
+ return (double)*((float *)src + idx);
}
-double getVectorDoubleValue_DOUBLE(void *src, int32_t index) {
- return (double)*((double *)src + index);
+double getVectorDoubleValue_DOUBLE(void *src, int32_t idx) {
+ return (double)*((double *)src + idx);
}
-int64_t getVectorTimestampValue(void *src, int32_t index) {
- return (int64_t)*((int64_t *)src + index);
+int64_t getVectorTimestampValue(void *src, int32_t idx) {
+ return (int64_t)*((int64_t *)src + idx);
}
_arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFn(int32_t srcType) {
_arithmetic_getVectorDoubleValue_fn_t p = NULL;
@@ -124,40 +124,40 @@ _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFn(int32_t srcType) {
}
-typedef void* (*_arithmetic_getVectorValueAddr_fn_t)(void *src, int32_t index);
+typedef void* (*_arithmetic_getVectorValueAddr_fn_t)(void *src, int32_t idx);
-void* getVectorValueAddr_BOOL(void *src, int32_t index) {
- return (void*)((bool *)src + index);
+void* getVectorValueAddr_BOOL(void *src, int32_t idx) {
+ return (void*)((bool *)src + idx);
}
-void* getVectorValueAddr_TINYINT(void *src, int32_t index) {
- return (void*)((int8_t *)src + index);
+void* getVectorValueAddr_TINYINT(void *src, int32_t idx) {
+ return (void*)((int8_t *)src + idx);
}
-void* getVectorValueAddr_UTINYINT(void *src, int32_t index) {
- return (void*)((uint8_t *)src + index);
+void* getVectorValueAddr_UTINYINT(void *src, int32_t idx) {
+ return (void*)((uint8_t *)src + idx);
}
-void* getVectorValueAddr_SMALLINT(void *src, int32_t index) {
- return (void*)((int16_t *)src + index);
+void* getVectorValueAddr_SMALLINT(void *src, int32_t idx) {
+ return (void*)((int16_t *)src + idx);
}
-void* getVectorValueAddr_USMALLINT(void *src, int32_t index) {
- return (void*)((uint16_t *)src + index);
+void* getVectorValueAddr_USMALLINT(void *src, int32_t idx) {
+ return (void*)((uint16_t *)src + idx);
}
-void* getVectorValueAddr_INT(void *src, int32_t index) {
- return (void*)((int32_t *)src + index);
+void* getVectorValueAddr_INT(void *src, int32_t idx) {
+ return (void*)((int32_t *)src + idx);
}
-void* getVectorValueAddr_UINT(void *src, int32_t index) {
- return (void*)((uint32_t *)src + index);
+void* getVectorValueAddr_UINT(void *src, int32_t idx) {
+ return (void*)((uint32_t *)src + idx);
}
-void* getVectorValueAddr_BIGINT(void *src, int32_t index) {
- return (void*)((int64_t *)src + index);
+void* getVectorValueAddr_BIGINT(void *src, int32_t idx) {
+ return (void*)((int64_t *)src + idx);
}
-void* getVectorValueAddr_UBIGINT(void *src, int32_t index) {
- return (void*)((uint64_t *)src + index);
+void* getVectorValueAddr_UBIGINT(void *src, int32_t idx) {
+ return (void*)((uint64_t *)src + idx);
}
-void* getVectorValueAddr_FLOAT(void *src, int32_t index) {
- return (void*)((float *)src + index);
+void* getVectorValueAddr_FLOAT(void *src, int32_t idx) {
+ return (void*)((float *)src + idx);
}
-void* getVectorValueAddr_DOUBLE(void *src, int32_t index) {
- return (void*)((double *)src + index);
+void* getVectorValueAddr_DOUBLE(void *src, int32_t idx) {
+ return (void*)((double *)src + idx);
}
_arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFn(int32_t srcType) {
@@ -474,34 +474,34 @@ void vectorRemainder(void *left, int32_t len1, int32_t _left_type, void *right,
}
}
-typedef int64_t (*_arithmetic_getVectorBigintValue_fn_t)(void *src, int32_t index);
+typedef int64_t (*_arithmetic_getVectorBigintValue_fn_t)(void *src, int32_t idx);
-int64_t getVectorBigintValue_BOOL(void *src, int32_t index) {
- return (int64_t)*((bool *)src + index);
+int64_t getVectorBigintValue_BOOL(void *src, int32_t idx) {
+ return (int64_t)*((bool *)src + idx);
}
-int64_t getVectorBigintValue_TINYINT(void *src, int32_t index) {
- return (int64_t)*((int8_t *)src + index);
+int64_t getVectorBigintValue_TINYINT(void *src, int32_t idx) {
+ return (int64_t)*((int8_t *)src + idx);
}
-int64_t getVectorBigintValue_UTINYINT(void *src, int32_t index) {
- return (int64_t)*((uint8_t *)src + index);
+int64_t getVectorBigintValue_UTINYINT(void *src, int32_t idx) {
+ return (int64_t)*((uint8_t *)src + idx);
}
-int64_t getVectorBigintValue_SMALLINT(void *src, int32_t index) {
- return (int64_t)*((int16_t *)src + index);
+int64_t getVectorBigintValue_SMALLINT(void *src, int32_t idx) {
+ return (int64_t)*((int16_t *)src + idx);
}
-int64_t getVectorBigintValue_USMALLINT(void *src, int32_t index) {
- return (int64_t)*((uint16_t *)src + index);
+int64_t getVectorBigintValue_USMALLINT(void *src, int32_t idx) {
+ return (int64_t)*((uint16_t *)src + idx);
}
-int64_t getVectorBigintValue_INT(void *src, int32_t index) {
- return (int64_t)*((int32_t *)src + index);
+int64_t getVectorBigintValue_INT(void *src, int32_t idx) {
+ return (int64_t)*((int32_t *)src + idx);
}
-int64_t getVectorBigintValue_UINT(void *src, int32_t index) {
- return (int64_t)*((uint32_t *)src + index);
+int64_t getVectorBigintValue_UINT(void *src, int32_t idx) {
+ return (int64_t)*((uint32_t *)src + idx);
}
-int64_t getVectorBigintValue_BIGINT(void *src, int32_t index) {
- return (int64_t)*((int64_t *)src + index);
+int64_t getVectorBigintValue_BIGINT(void *src, int32_t idx) {
+ return (int64_t)*((int64_t *)src + idx);
}
-int64_t getVectorBigintValue_UBIGINT(void *src, int32_t index) {
- return (int64_t)*((uint64_t *)src + index);
+int64_t getVectorBigintValue_UBIGINT(void *src, int32_t idx) {
+ return (int64_t)*((uint64_t *)src + idx);
}
_arithmetic_getVectorBigintValue_fn_t getVectorBigintValueFn(int32_t srcType) {
diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c
index 55afd4c62096974e379e78d448086f10e9860764..4786700f97ab488e33df81810f3f061b5bb67111 100644
--- a/src/common/src/tdataformat.c
+++ b/src/common/src/tdataformat.c
@@ -304,14 +304,14 @@ bool isNEleNull(SDataCol *pCol, int nEle) {
return true;
}
-static FORCE_INLINE void dataColSetNullAt(SDataCol *pCol, int index) {
+static FORCE_INLINE void dataColSetNullAt(SDataCol *pCol, int idx) {
if (IS_VAR_DATA_TYPE(pCol->type)) {
- pCol->dataOff[index] = pCol->len;
+ pCol->dataOff[idx] = pCol->len;
char *ptr = POINTER_SHIFT(pCol->pData, pCol->len);
setVardataNull(ptr, pCol->type);
pCol->len += varDataTLen(ptr);
} else {
- setNull(POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * index), pCol->type, pCol->bytes);
+ setNull(POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * idx), pCol->type, pCol->bytes);
pCol->len += TYPE_BYTES[pCol->type];
}
}
diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c
index 2b8c2bbb660bdcf36e2cea1390db89235733ddcc..44a53efd8c01ae47d4c6d069ca6d6d5c53448b10 100644
--- a/src/common/src/tglobal.c
+++ b/src/common/src/tglobal.c
@@ -397,10 +397,10 @@ bool taosCfgDynamicOptions(char *msg) {
return false;
}
-void taosAddDataDir(int index, char *v1, int level, int primary) {
- tstrncpy(tsDiskCfg[index].dir, v1, TSDB_FILENAME_LEN);
- tsDiskCfg[index].level = level;
- tsDiskCfg[index].primary = primary;
+void taosAddDataDir(int idx, char *v1, int level, int primary) {
+ tstrncpy(tsDiskCfg[idx].dir, v1, TSDB_FILENAME_LEN);
+ tsDiskCfg[idx].level = level;
+ tsDiskCfg[idx].primary = primary;
uTrace("dataDir:%s, level:%d primary:%d is configured", v1, level, primary);
}
diff --git a/src/common/src/tname.c b/src/common/src/tname.c
index 521dcffa6c018a00e7485d15ee9e5799054bdba5..7653df3879db6596accad6fd46f8734110960c2f 100644
--- a/src/common/src/tname.c
+++ b/src/common/src/tname.c
@@ -442,29 +442,29 @@ void tNameAssign(SName* dst, const SName* src) {
memcpy(dst, src, sizeof(SName));
}
-int32_t tNameSetDbName(SName* dst, const char* acct, SStrToken* dbToken) {
- assert(dst != NULL && dbToken != NULL && acct != NULL);
+int32_t tNameSetDbName(SName* dst, const char* accnt, SStrToken* dbToken) {
+ assert(dst != NULL && dbToken != NULL && accnt != NULL);
// too long account id or too long db name
- if (strlen(acct) >= tListLen(dst->acctId) || dbToken->n >= tListLen(dst->dbname)) {
+ if (strlen(accnt) >= tListLen(dst->acctId) || dbToken->n >= tListLen(dst->dbname)) {
return -1;
}
dst->type = TSDB_DB_NAME_T;
- tstrncpy(dst->acctId, acct, tListLen(dst->acctId));
+ tstrncpy(dst->acctId, accnt, tListLen(dst->acctId));
tstrncpy(dst->dbname, dbToken->z, dbToken->n + 1);
return 0;
}
-int32_t tNameSetAcctId(SName* dst, const char* acct) {
- assert(dst != NULL && acct != NULL);
+int32_t tNameSetAcctId(SName* dst, const char* accnt) {
+ assert(dst != NULL && accnt != NULL);
// too long account id or too long db name
- if (strlen(acct) >= tListLen(dst->acctId)) {
+ if (strlen(accnt) >= tListLen(dst->acctId)) {
return -1;
}
- tstrncpy(dst->acctId, acct, tListLen(dst->acctId));
+ tstrncpy(dst->acctId, accnt, tListLen(dst->acctId));
assert(strlen(dst->acctId) > 0);
diff --git a/src/common/src/ttypes.c b/src/common/src/ttypes.c
index 81bc9c7275b07cf41dc1305e4db807e1b2b839a0..fa4126350c2e809459c830c7f5cd08e2bd851ac7 100644
--- a/src/common/src/ttypes.c
+++ b/src/common/src/ttypes.c
@@ -259,8 +259,8 @@ static void getStatics_u64(const void *pData, int32_t numOfRow, int64_t *min, in
static void getStatics_f(const void *pData, int32_t numOfRow, int64_t *min, int64_t *max,
int64_t *sum, int16_t *minIndex, int16_t *maxIndex, int16_t *numOfNull) {
float *data = (float *)pData;
- float fmin = FLT_MAX;
- float fmax = -FLT_MAX;
+ float flmin = FLT_MAX;
+ float flmax = -FLT_MAX;
double dsum = 0;
*minIndex = 0;
*maxIndex = 0;
@@ -276,20 +276,20 @@ static void getStatics_f(const void *pData, int32_t numOfRow, int64_t *min, int6
float fv = GET_FLOAT_VAL((const char*)&(data[i]));
dsum += fv;
- if (fmin > fv) {
- fmin = fv;
+ if (flmin > fv) {
+ flmin = fv;
*minIndex = i;
}
- if (fmax < fv) {
- fmax = fv;
+ if (flmax < fv) {
+ flmax = fv;
*maxIndex = i;
}
}
SET_DOUBLE_VAL(sum, dsum);
- SET_DOUBLE_VAL(max, fmax);
- SET_DOUBLE_VAL(min, fmin);
+ SET_DOUBLE_VAL(max, flmax);
+ SET_DOUBLE_VAL(min, flmin);
}
static void getStatics_d(const void *pData, int32_t numOfRow, int64_t *min, int64_t *max,
diff --git a/src/dnode/src/dnodeCheck.c b/src/dnode/src/dnodeCheck.c
index 87baff30673afc68eb23a00bef279433a422ba67..f0218fdba9b531800ab5a6791ee700e0a36e5c9b 100644
--- a/src/dnode/src/dnodeCheck.c
+++ b/src/dnode/src/dnodeCheck.c
@@ -229,12 +229,12 @@ static void dnodeAllocCheckItem() {
}
void dnodeCleanupCheck() {
- for (ECheckItemType index = 0; index < TSDB_CHECK_ITEM_MAX; ++index) {
- if (tsCheckItem[index].enable && tsCheckItem[index].stopFp) {
- (*tsCheckItem[index].stopFp)();
+ for (ECheckItemType idx = 0; idx < TSDB_CHECK_ITEM_MAX; ++idx) {
+ if (tsCheckItem[idx].enable && tsCheckItem[idx].stopFp) {
+ (*tsCheckItem[idx].stopFp)();
}
- if (tsCheckItem[index].cleanUpFp) {
- (*tsCheckItem[index].cleanUpFp)();
+ if (tsCheckItem[idx].cleanUpFp) {
+ (*tsCheckItem[idx].cleanUpFp)();
}
}
}
@@ -242,19 +242,19 @@ void dnodeCleanupCheck() {
int32_t dnodeInitCheck() {
dnodeAllocCheckItem();
- for (ECheckItemType index = 0; index < TSDB_CHECK_ITEM_MAX; ++index) {
- if (tsCheckItem[index].initFp) {
- if ((*tsCheckItem[index].initFp)() != 0) {
- dError("failed to init check item:%s", tsCheckItem[index].name);
+ for (ECheckItemType idx = 0; idx < TSDB_CHECK_ITEM_MAX; ++idx) {
+ if (tsCheckItem[idx].initFp) {
+ if ((*tsCheckItem[idx].initFp)() != 0) {
+ dError("failed to init check item:%s", tsCheckItem[idx].name);
return -1;
}
}
}
- for (ECheckItemType index = 0; index < TSDB_CHECK_ITEM_MAX; ++index) {
- if (tsCheckItem[index].enable && tsCheckItem[index].startFp) {
- if ((*tsCheckItem[index].startFp)() != 0) {
- dError("failed to check item:%s", tsCheckItem[index].name);
+ for (ECheckItemType idx = 0; idx < TSDB_CHECK_ITEM_MAX; ++idx) {
+ if (tsCheckItem[idx].enable && tsCheckItem[idx].startFp) {
+ if ((*tsCheckItem[idx].startFp)() != 0) {
+ dError("failed to check item:%s", tsCheckItem[idx].name);
exit(-1);
}
}
diff --git a/src/dnode/src/dnodePeer.c b/src/dnode/src/dnodePeer.c
index 08269c0bf6141974366936660bee326682cd90f5..cc1b1c98aa6c97032b4ce6aa198088353c48374f 100644
--- a/src/dnode/src/dnodePeer.c
+++ b/src/dnode/src/dnodePeer.c
@@ -56,17 +56,17 @@ int32_t dnodeInitServer() {
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_DM_GRANT] = dnodeDispatchToMPeerQueue;
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_DM_STATUS] = dnodeDispatchToMPeerQueue;
- SRpcInit rpcInit;
- memset(&rpcInit, 0, sizeof(rpcInit));
- rpcInit.localPort = tsDnodeDnodePort;
- rpcInit.label = "DND-S";
- rpcInit.numOfThreads = 1;
- rpcInit.cfp = dnodeProcessReqMsgFromDnode;
- rpcInit.sessions = TSDB_MAX_VNODES << 4;
- rpcInit.connType = TAOS_CONN_SERVER;
- rpcInit.idleTime = tsShellActivityTimer * 1000;
-
- tsServerRpc = rpcOpen(&rpcInit);
+ SRpcInit rpcInitial;
+ memset(&rpcInitial, 0, sizeof(rpcInitial));
+ rpcInitial.localPort = tsDnodeDnodePort;
+ rpcInitial.label = "DND-S";
+ rpcInitial.numOfThreads = 1;
+ rpcInitial.cfp = dnodeProcessReqMsgFromDnode;
+ rpcInitial.sessions = TSDB_MAX_VNODES << 4;
+ rpcInitial.connType = TAOS_CONN_SERVER;
+ rpcInitial.idleTime = tsShellActivityTimer * 1000;
+
+ tsServerRpc = rpcOpen(&rpcInitial);
if (tsServerRpc == NULL) {
dError("failed to init inter-dnodes RPC server");
return -1;
@@ -123,19 +123,19 @@ static void dnodeProcessReqMsgFromDnode(SRpcMsg *pMsg, SRpcEpSet *pEpSet) {
int32_t dnodeInitClient() {
char secret[TSDB_KEY_LEN] = "secret";
- SRpcInit rpcInit;
- memset(&rpcInit, 0, sizeof(rpcInit));
- rpcInit.label = "DND-C";
- rpcInit.numOfThreads = 1;
- rpcInit.cfp = dnodeProcessRspFromDnode;
- rpcInit.sessions = TSDB_MAX_VNODES << 4;
- rpcInit.connType = TAOS_CONN_CLIENT;
- rpcInit.idleTime = tsShellActivityTimer * 1000;
- rpcInit.user = "t";
- rpcInit.ckey = "key";
- rpcInit.secret = secret;
-
- tsClientRpc = rpcOpen(&rpcInit);
+ SRpcInit rpcInitial;
+ memset(&rpcInitial, 0, sizeof(rpcInitial));
+ rpcInitial.label = "DND-C";
+ rpcInitial.numOfThreads = 1;
+ rpcInitial.cfp = dnodeProcessRspFromDnode;
+ rpcInitial.sessions = TSDB_MAX_VNODES << 4;
+ rpcInitial.connType = TAOS_CONN_CLIENT;
+ rpcInitial.idleTime = tsShellActivityTimer * 1000;
+ rpcInitial.user = "t";
+ rpcInitial.ckey = "key";
+ rpcInitial.secret = secret;
+
+ tsClientRpc = rpcOpen(&rpcInitial);
if (tsClientRpc == NULL) {
dError("failed to init mnode rpc client");
return -1;
diff --git a/src/dnode/src/dnodeShell.c b/src/dnode/src/dnodeShell.c
index 0f536a84ccdb1519231abccc1a0f4ed30ac86d6c..2bf67f9a5c214036b21d7742af13925f3bbd3b1a 100644
--- a/src/dnode/src/dnodeShell.c
+++ b/src/dnode/src/dnodeShell.c
@@ -85,18 +85,18 @@ int32_t dnodeInitShell() {
numOfThreads = 1;
}
- SRpcInit rpcInit;
- memset(&rpcInit, 0, sizeof(rpcInit));
- rpcInit.localPort = tsDnodeShellPort;
- rpcInit.label = "SHELL";
- rpcInit.numOfThreads = numOfThreads;
- rpcInit.cfp = dnodeProcessMsgFromShell;
- rpcInit.sessions = tsMaxShellConns;
- rpcInit.connType = TAOS_CONN_SERVER;
- rpcInit.idleTime = tsShellActivityTimer * 1000;
- rpcInit.afp = dnodeRetrieveUserAuthInfo;
-
- tsShellRpc = rpcOpen(&rpcInit);
+ SRpcInit rpcInitial;
+ memset(&rpcInitial, 0, sizeof(rpcInitial));
+ rpcInitial.localPort = tsDnodeShellPort;
+ rpcInitial.label = "SHELL";
+ rpcInitial.numOfThreads = numOfThreads;
+ rpcInitial.cfp = dnodeProcessMsgFromShell;
+ rpcInitial.sessions = tsMaxShellConns;
+ rpcInitial.connType = TAOS_CONN_SERVER;
+ rpcInitial.idleTime = tsShellActivityTimer * 1000;
+ rpcInitial.afp = dnodeRetrieveUserAuthInfo;
+
+ tsShellRpc = rpcOpen(&rpcInitial);
if (tsShellRpc == NULL) {
dError("failed to init shell rpc server");
return -1;
@@ -260,10 +260,10 @@ SDnodeStatisInfo dnodeGetStatisInfo() {
return info;
}
-int32_t dnodeGetHttpStatusInfo(int32_t index) {
+int32_t dnodeGetHttpStatusInfo(int32_t idx) {
int32_t httpStatus = 0;
#ifdef HTTP_EMBEDDED
- httpStatus = httpGetStatusCodeCount(index);
+ httpStatus = httpGetStatusCodeCount(idx);
#endif
return httpStatus;
}
diff --git a/src/kit/shell/CMakeLists.txt b/src/kit/shell/CMakeLists.txt
index b311361c438d033ad3f7582d30df7d1c33357c1d..614fa328bae1f0ad32917da6b08dc372a8d696a2 100644
--- a/src/kit/shell/CMakeLists.txt
+++ b/src/kit/shell/CMakeLists.txt
@@ -46,6 +46,8 @@ ELSEIF (TD_DARWIN)
LIST(APPEND SRC ./src/shellCommand.c)
LIST(APPEND SRC ./src/shellImport.c)
LIST(APPEND SRC ./src/shellCheck.c)
+ LIST(APPEND SRC ./src/shellAuto.c)
+ LIST(APPEND SRC ./src/tire.c)
ADD_EXECUTABLE(shell ${SRC})
# linking with dylib
TARGET_LINK_LIBRARIES(shell taos cJson)
diff --git a/src/kit/shell/inc/shellAuto.h b/src/kit/shell/inc/shellAuto.h
new file mode 100644
index 0000000000000000000000000000000000000000..0bd6bdf4038c112b453feea02950cc3aa5577a50
--- /dev/null
+++ b/src/kit/shell/inc/shellAuto.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef __SHELL_AUTO__
+#define __SHELL_AUTO__
+
+#define TAB_KEY 0x09
+
+// press tab key
+void pressTabKey(TAOS * con, Command * cmd);
+
+// press othr key
+void pressOtherKey(char c);
+
+// init shell auto funciton , shell start call once
+bool shellAutoInit();
+
+// exit shell auto funciton, shell exit call once
+void shellAutoExit();
+
+// callback autotab module
+void callbackAutoTab(char* sqlstr, TAOS* pSql, bool usedb);
+
+
+#endif
diff --git a/src/kit/shell/inc/shellCommand.h b/src/kit/shell/inc/shellCommand.h
index 6e4d3e382e3d7e8c50405c07da8ed73725230434..47ef6b30a9b37ef0d790bcfc427abdfc14907874 100644
--- a/src/kit/shell/inc/shellCommand.h
+++ b/src/kit/shell/inc/shellCommand.h
@@ -41,6 +41,7 @@ extern void deleteChar(Command *cmd);
extern void moveCursorLeft(Command *cmd);
extern void moveCursorRight(Command *cmd);
extern void positionCursorHome(Command *cmd);
+extern void positionCursorMiddle(Command *cmd);
extern void positionCursorEnd(Command *cmd);
extern void showOnScreen(Command *cmd);
extern void updateBuffer(Command *cmd);
@@ -51,5 +52,6 @@ int countPrefixOnes(unsigned char c);
void clearScreen(int ecmd_pos, int cursor_pos);
void printChar(char c, int times);
void positionCursor(int step, int direction);
+void getPrevCharSize(const char *str, int pos, int *size, int *width);
#endif
diff --git a/src/kit/shell/inc/tire.h b/src/kit/shell/inc/tire.h
new file mode 100644
index 0000000000000000000000000000000000000000..88bae5480937cfdb1513415d13ba41d0a60e6b22
--- /dev/null
+++ b/src/kit/shell/inc/tire.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef __TRIE__
+#define __TRIE__
+
+//
+// The prefix search tree is a efficient storage words and search words tree, it support 95 visible ascii code character
+//
+#define FIRST_ASCII 40 // first visiable char is '0'
+#define LAST_ASCII 122 // last visilbe char is 'z'
+
+// capacity save char is 95
+#define CHAR_CNT (LAST_ASCII - FIRST_ASCII + 1)
+#define MAX_WORD_LEN 256 // max insert word length
+
+// define STire
+#define TIRE_TREE 0
+#define TIRE_LIST 1
+
+typedef struct STireNode {
+ struct STireNode** d;
+ bool end; // record end flag
+}STireNode;
+
+typedef struct StrName {
+ char * name;
+ struct StrName * next;
+}StrName;
+
+
+typedef struct STire {
+ char type; // see define TIRE_
+ STireNode root;
+
+ StrName * head;
+ StrName * tail;
+
+ int count; // all count
+ int ref;
+}STire;
+
+typedef struct SMatchNode {
+ char* word;
+ struct SMatchNode* next;
+}SMatchNode;
+
+
+typedef struct SMatch {
+ SMatchNode* head;
+ SMatchNode* tail; // append node to tail
+ int count;
+ char pre[MAX_WORD_LEN];
+}SMatch;
+
+
+// ----------- interface -------------
+
+// create prefix search tree, return value call freeTire to free
+STire* createTire(char type);
+
+// destroy prefix search tree
+void freeTire(STire* tire);
+
+// add a new word
+bool insertWord(STire* tire, char* word);
+
+// add a new word
+bool deleteWord(STire* tire, char* word);
+
+// match prefix words, if match is not NULL , put all item to match and return match
+SMatch* matchPrefix(STire* tire, char* prefix, SMatch* match);
+
+// get all items from tires tree
+SMatch* enumAll(STire* tire);
+
+// free match result
+void freeMatch(SMatch* match);
+
+#endif
diff --git a/src/kit/shell/src/shellAuto.c b/src/kit/shell/src/shellAuto.c
new file mode 100644
index 0000000000000000000000000000000000000000..8622b201a6fe6666476f0ac9916aebc169b78923
--- /dev/null
+++ b/src/kit/shell/src/shellAuto.c
@@ -0,0 +1,1761 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#define __USE_XOPEN
+#include "os.h"
+#include "tglobal.h"
+#include "shell.h"
+#include "shellCommand.h"
+#include "tkey.h"
+#include "tulog.h"
+#include "shellAuto.h"
+#include "tire.h"
+#include "tthread.h"
+
+//
+// ------------- define area ---------------
+//
+#define UNION_ALL " union all "
+
+
+// extern function
+void insertChar(Command *cmd, char *c, int size);
+
+
+typedef struct SAutoPtr {
+ STire* p;
+ int ref;
+}SAutoPtr;
+
+typedef struct SWord{
+ int type ; // word type , see WT_ define
+ char * word;
+ int32_t len;
+ struct SWord * next;
+ bool free; // if true need free
+}SWord;
+
+typedef struct {
+ char * source;
+ int32_t source_len; // valid data length in source
+ int32_t count;
+ SWord* head;
+ // matched information
+ int32_t matchIndex; // matched word index in words
+ int32_t matchLen; // matched length at matched word
+}SWords;
+
+
+SWords shellCommands[] = {
+ {"alter database ", 0, 0, NULL},
+ {"alter dnode balance ", 0, 0, NULL},
+ {"alter dnode resetlog;", 0, 0, NULL},
+ {"alter dnode debugFlag 141;", 0, 0, NULL},
+ {"alter dnode monitor 1;", 0, 0, NULL},
+ {"alter table ", 0, 0, NULL},
+ {"alter table modify column", 0, 0, NULL},
+ {"alter local resetlog;", 0, 0, NULL},
+ {"alter local DebugFlag 143;", 0, 0, NULL},
+ {"alter local cDebugFlag 143;", 0, 0, NULL},
+ {"alter local uDebugFlag 143;", 0, 0, NULL},
+ {"alter local rpcDebugFlag 143;", 0, 0, NULL},
+ {"alter local tmrDebugFlag 143;", 0, 0, NULL},
+ {"alter topic", 0, 0, NULL},
+ {"alter user pass", 0, 0, NULL},
+ {"alter user privilege read", 0, 0, NULL},
+ {"alter user privilege write", 0, 0, NULL},
+ {"create table using tags(", 0, 0, NULL},
+ {"create database ", 0, 0, NULL},
+ {"create table as ", 0, 0, NULL},
+ {"create dnode ", 0, 0, NULL},
+ {"create topic", 0, 0, NULL},
+ {"create function ", 0, 0, NULL},
+ {"create user pass", 0, 0, NULL},
+ {"compact vnode in", 0, 0, NULL},
+ {"describe ", 0, 0, NULL},
+#ifdef TD_ENTERPRISE
+ {"delete from where", 0, 0, NULL},
+#endif
+ {"drop database ", 0, 0, NULL},
+ {"drop table ", 0, 0, NULL},
+ {"drop dnode ", 0, 0, NULL},
+ {"drop user ;", 0, 0, NULL},
+ {"drop function", 0, 0, NULL},
+ {"drop topic", 0, 0, NULL},
+ {"kill connection", 0, 0, NULL},
+ {"kill query", 0, 0, NULL},
+ {"kill stream", 0, 0, NULL},
+ {"select * from where ", 0, 0, NULL},
+ {"select _block_dist() from \\G;", 0, 0, NULL},
+ {"select client_version();", 0, 0, NULL},
+ {"select current_user();", 0, 0, NULL},
+ {"select database;", 0, 0, NULL},
+ {"select server_version();", 0, 0, NULL},
+ {"set max_binary_display_width ", 0, 0, NULL},
+ {"show create database \\G;", 0, 0, NULL},
+ {"show create stable \\G;", 0, 0, NULL},
+ {"show create table \\G;", 0, 0, NULL},
+ {"show connections;", 0, 0, NULL},
+ {"show databases;", 0, 0, NULL},
+ {"show dnodes;", 0, 0, NULL},
+ {"show functions;", 0, 0, NULL},
+ {"show modules;", 0, 0, NULL},
+ {"show mnodes;", 0, 0, NULL},
+ {"show queries;", 0, 0, NULL},
+ {"show stables;", 0, 0, NULL},
+ {"show stables like ", 0, 0, NULL},
+ {"show streams;", 0, 0, NULL},
+ {"show scores;", 0, 0, NULL},
+ {"show tables;", 0, 0, NULL},
+ {"show tables like", 0, 0, NULL},
+ {"show users;", 0, 0, NULL},
+ {"show variables;", 0, 0, NULL},
+ {"show vgroups;", 0, 0, NULL},
+ {"insert into values(", 0, 0, NULL},
+ {"insert into using tags(", 0, 0, NULL},
+ {"use ", 0, 0, NULL},
+ {"quit", 0, 0, NULL}
+};
+
+char * keywords[] = {
+ "and ",
+ "asc ",
+ "desc ",
+ "from ",
+ "fill(",
+ "limit ",
+ "where ",
+ "interval(",
+ "order by ",
+ "order by ",
+ "offset ",
+ "or ",
+ "group by ",
+ "now()",
+ "session(",
+ "sliding ",
+ "slimit ",
+ "soffset ",
+ "state_window(",
+ "today() ",
+ "union all select ",
+};
+
+char * functions[] = {
+ "count(",
+ "sum(",
+ "avg(",
+ "last(",
+ "last_row(",
+ "top(",
+ "interp(",
+ "max(",
+ "min(",
+ "now()",
+ "today()",
+ "percentile(",
+ "tail(",
+ "pow(",
+ "abs(",
+ "atan(",
+ "acos(",
+ "asin(",
+ "apercentile(",
+ "bottom(",
+ "cast(",
+ "ceil(",
+ "char_length(",
+ "cos(",
+ "concat(",
+ "concat_ws(",
+ "csum(",
+ "diff(",
+ "derivative(",
+ "elapsed(",
+ "first(",
+ "floor(",
+ "hyperloglog(",
+ "histogram(",
+ "irate(",
+ "leastsquares(",
+ "length(",
+ "log(",
+ "lower(",
+ "ltrim(",
+ "mavg(",
+ "mode(",
+ "tan(",
+ "round(",
+ "rtrim(",
+ "sample(",
+ "sin(",
+ "spread(",
+ "substr(",
+ "statecount(",
+ "stateduration(",
+ "stddev(",
+ "sqrt(",
+ "timediff(",
+ "timezone(",
+ "timetruncate(",
+ "twa(",
+ "to_unixtimestamp(",
+ "unique(",
+ "upper(",
+};
+
+char * tb_actions[] = {
+ "add column",
+ "modify column",
+ "drop column",
+ "change tag",
+};
+
+char * db_options[] = {
+ "blocks",
+ "cachelast",
+ "comp",
+ "keep",
+ "replica",
+ "quorum",
+};
+
+char * data_types[] = {
+ "timestamp",
+ "int",
+ "float",
+ "double",
+ "binary(16)",
+ "nchar(16)",
+ "bigint",
+ "smallint",
+ "tinyint",
+ "bool",
+ "json"
+};
+
+char * key_tags[] = {
+ "tags("
+};
+
+
+//
+// ------- gobal variant define ---------
+//
+int32_t firstMatchIndex = -1; // first match shellCommands index
+int32_t lastMatchIndex = -1; // last match shellCommands index
+int32_t curMatchIndex = -1; // current match shellCommands index
+int32_t lastWordBytes = -1; // printShow last word length
+bool waitAutoFill = false;
+
+
+//
+// ----------- global var array define -----------
+//
+#define WT_VAR_DBNAME 0
+#define WT_VAR_STABLE 1
+#define WT_VAR_TABLE 2
+#define WT_VAR_DNODEID 3
+#define WT_VAR_USERNAME 4
+#define WT_VAR_ALLTABLE 5
+#define WT_VAR_FUNC 6
+#define WT_VAR_KEYWORD 7
+#define WT_VAR_TBACTION 8
+#define WT_VAR_DBOPTION 9
+#define WT_VAR_DATATYPE 10
+#define WT_VAR_KEYTAGS 11
+#define WT_VAR_ANYWORD 12
+#define WT_VAR_CNT 13
+
+#define WT_FROM_DB_MAX 4 // max get content from db
+#define WT_FROM_DB_CNT (WT_FROM_DB_MAX + 1)
+
+#define WT_TEXT 0xFF
+
+char dbName[256] = ""; // save use database name;
+// tire array
+STire* tires[WT_VAR_CNT];
+pthread_mutex_t tiresMutex;
+//save thread handle obtain var name from db server
+pthread_t* threads[WT_FROM_DB_CNT];
+// obtain var name with sql from server
+char varTypes[WT_VAR_CNT][64] = {
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ ""
+};
+
+char varSqls[WT_FROM_DB_CNT][64] = {
+ "show databases;",
+ "show stables;",
+ "show tables;",
+ "show dnodes;",
+ "show users;"
+};
+
+
+// var words current cursor, if user press any one key except tab, cursorVar can be reset to -1
+int cursorVar = -1;
+bool varMode = false; // enter var names list mode
+
+
+TAOS* varCon = NULL;
+Command* varCmd = NULL;
+SMatch* lastMatch = NULL; // save last match result
+int cntDel = 0; // delete byte count after next press tab
+
+
+// show auto tab introduction
+void printfIntroduction() {
+ printf(" ********************* How to Use TAB in TAOS Shell ******************************\n");
+ printf(" * Taos shell supports pressing TAB key to complete word. You can try it. *\n");
+ printf(" * Press TAB key anywhere, You'll get surprise. *\n");
+ printf(" * KEYBOARD SHORTCUT: *\n");
+ printf(" * [ TAB ] ...... Complete the word or show help if no input *\n");
+ printf(" * [ Ctrl + A ] ...... move cursor to [A]head of line *\n");
+ printf(" * [ Ctrl + E ] ...... move cursor to [E]nd of line *\n");
+ printf(" * [ Ctrl + W ] ...... move cursor to line of middle *\n");
+ printf(" * [ Ctrl + L ] ...... clean screen *\n");
+ printf(" * [ Ctrl + K ] ...... clean after cursor *\n");
+ printf(" * [ Ctrl + U ] ...... clean before cursor *\n");
+ printf(" * *\n");
+ printf(" **********************************************************************************\n\n");
+}
+
+void showHelp() {
+ printf("\nThe following are supported commands for Taos shell:");
+ printf("\n\
+ ----- A ----- \n\
+ alter database \n\
+ alter dnode balance \n\
+ alter dnode resetlog;\n\
+ alter dnode DebugFlag 143;\n\
+ alter dnode monitor 1;\n\
+ alter table ADD COLUMN ; \n\
+ alter table DROP COLUMN ; \n\
+ alter table MODIFY COLUMN ;\n\
+ alter local resetlog; \n\
+ alter local DebugFlag 143; \n\
+ alter topic \n\
+ alter user pass\n\
+ alter user privilege read ;\n\
+ alter user privilege write ;\n\
+ ----- C ----- \n\
+ create table using tags ...\n\
+ create database ;\n\
+ create table as ...\n\
+ create dnode \n\
+ create topic \n\
+ create function \n\
+ create user pass ;\n\
+ compact vnode in (vgid,vgid,vgid);\n\
+ ----- D ----- \n\
+ describe ;\n\
+ delete from where ... \n\
+ drop database ;\n\
+ drop table ;\n\
+ drop dnode ;\n\
+ drop function ;\n\
+ drop topic ;\n\
+ drop user ;\n\
+ ----- K ----- \n\
+ kill connection ; \n\
+ kill query ; \n\
+ kill stream ; \n\
+ ----- S ----- \n\
+ select * from where ... \n\
+ select _block_dist() from ;\n\
+ select client_version();\n\
+ select current_user();\n\
+ select database;\n\
+ select server_version();\n\
+ set max_binary_display_width ; \n\
+ show create database ;\n\
+ show create stable ;\n\
+ show create table ;\n\
+ show connections;\n\
+ show databases;\n\
+ show dnodes;\n\
+ show functions;\n\
+ show modules;\n\
+ show mnodes;\n\
+ show queries;\n\
+ show stables;\n\
+ show stables like ''; note: regular expression only support '_' and '%%' match.\n\
+ show streams;\n\
+ show scores;\n\
+ show tables;\n\
+ show tables like ''; \n\
+ show users;\n\
+ show variables;\n\
+ show vgroups;\n\
+ ----- I ----- \n\
+ insert into values(...) ;\n\
+ ----- U ----- \n\
+ use ;");
+
+ printf("\n\n");
+
+ //define in getDuration() function
+ printf("\
+ Timestamp expression Format:\n\
+ b - nanosecond \n\
+ u - microsecond \n\
+ a - millisecond \n\
+ s - second \n\
+ m - minute \n\
+ h - hour \n\
+ d - day \n\
+ w - week \n\
+ now - current time \n\
+ Example : \n\
+ select * from t1 where ts > now - 2w + 3d and ts <= now - 1w -2h ;\n");
+ printf("\n");
+}
+
+//
+// ------------------- parse words --------------------------
+//
+
+#define SHELL_COMMAND_COUNT() (sizeof(shellCommands) / sizeof(SWords))
+
+// get at
+SWord * atWord(SWords * command, int32_t index) {
+ SWord * word = command->head;
+ for (int32_t i = 0; i < index; i++) {
+ if (word == NULL)
+ return NULL;
+ word = word->next;
+ }
+
+ return word;
+}
+
+#define MATCH_WORD(x) atWord(x, x->matchIndex)
+
+int wordType(const char* p, int32_t len) {
+ for (int i = 0; i < WT_VAR_CNT; i++) {
+ if (strncmp(p, varTypes[i], len) == 0)
+ return i;
+ }
+ return WT_TEXT;
+}
+
+// add word
+SWord * addWord(const char* p, int32_t len, bool pattern) {
+ SWord* word = (SWord *) malloc(sizeof(SWord));
+ memset(word, 0, sizeof(SWord));
+ word->word = (char* )p;
+ word->len = len;
+
+ // check format
+ if (pattern) {
+ word->type = wordType(p, len);
+ } else {
+ word->type = WT_TEXT;
+ }
+
+ return word;
+}
+
+// parse one command
+void parseCommand(SWords * command, bool pattern) {
+ char * p = command->source;
+ int32_t start = 0;
+ int32_t size = command->source_len > 0 ? command->source_len : strlen(p);
+
+ bool lastBlank = false;
+ for (int i = 0; i <= size; i++) {
+ if (p[i] == ' ' || i == size) {
+ // check continue blank like ' '
+ if (p[i] == ' ') {
+ if (lastBlank) {
+ start ++;
+ continue;
+ }
+ if (i == 0) { // first blank
+ lastBlank = true;
+ start ++;
+ continue;
+ }
+ lastBlank = true;
+ }
+
+ // found split or string end , append word
+ if (command->head == NULL) {
+ command->head = addWord(p + start, i - start, pattern);
+ command->count = 1;
+ } else {
+ SWord * word = command->head;
+ while (word->next) {
+ word = word->next;
+ }
+ word->next = addWord(p + start, i - start, pattern);
+ command->count ++;
+ }
+ start = i + 1;
+ } else {
+ lastBlank = false;
+ }
+ }
+}
+
+// free Command
+void freeCommand(SWords * command) {
+ SWord * word = command->head;
+ if (word == NULL) {
+ return ;
+ }
+
+ // loop
+ while (word->next) {
+ SWord * tmp = word;
+ word = word->next;
+ // if malloc need free
+ if(tmp->free && tmp->word)
+ free(tmp->word);
+ free(tmp);
+ }
+
+ // if malloc need free
+ if(word->free && word->word)
+ free(word->word);
+ free(word);
+}
+
+void GenerateVarType(int type, char** p, int count) {
+ STire* tire = createTire(TIRE_LIST);
+ for (int i = 0; i < count; i++) {
+ insertWord(tire, p[i]);
+ }
+
+ pthread_mutex_lock(&tiresMutex);
+ tires[type] = tire;
+ pthread_mutex_unlock(&tiresMutex);
+}
+
+//
+// -------------------- shell auto ----------------
+//
+
+
+// init shell auto funciton , shell start call once
+bool shellAutoInit() {
+ // command
+ int32_t count = SHELL_COMMAND_COUNT();
+ for (int32_t i = 0; i < count; i ++) {
+ parseCommand(shellCommands + i, true);
+ }
+
+ // tires
+ memset(tires, 0, sizeof(STire*) * WT_VAR_CNT);
+ pthread_mutex_init(&tiresMutex, NULL);
+
+ // threads
+ memset(threads, 0, sizeof(pthread_t*) * WT_FROM_DB_CNT);
+
+ // generate varType
+ GenerateVarType(WT_VAR_FUNC, functions, sizeof(functions) /sizeof(char *));
+ GenerateVarType(WT_VAR_KEYWORD, keywords, sizeof(keywords) /sizeof(char *));
+ GenerateVarType(WT_VAR_DBOPTION, db_options, sizeof(db_options) /sizeof(char *));
+ GenerateVarType(WT_VAR_TBACTION, tb_actions, sizeof(tb_actions) /sizeof(char *));
+ GenerateVarType(WT_VAR_DATATYPE, data_types, sizeof(data_types) /sizeof(char *));
+ GenerateVarType(WT_VAR_KEYTAGS, key_tags, sizeof(key_tags) /sizeof(char *));
+
+ printfIntroduction();
+
+ return true;
+}
+
+// exit shell auto funciton, shell exit call once
+void shellAutoExit() {
+ // free command
+ int32_t count = SHELL_COMMAND_COUNT();
+ for (int32_t i = 0; i < count; i ++) {
+ freeCommand(shellCommands + i);
+ }
+
+ // free tires
+ pthread_mutex_lock(&tiresMutex);
+ for (int32_t i = 0; i < WT_VAR_CNT; i++) {
+ if (tires[i]) {
+ freeTire(tires[i]);
+ tires[i] = NULL;
+ }
+ }
+ pthread_mutex_unlock(&tiresMutex);
+ // destory
+ pthread_mutex_destroy(&tiresMutex);
+
+ // free threads
+ for (int32_t i = 0; i < WT_VAR_CNT; i++) {
+ if (threads[i]) {
+ taosDestroyThread(threads[i]);
+ threads[i] = NULL;
+ }
+ }
+
+ // free lastMatch
+ if (lastMatch) {
+ freeMatch(lastMatch);
+ lastMatch = NULL;
+ }
+}
+
+//
+// ------------------- auto ptr for tires --------------------------
+//
+bool setNewAuotPtr(int type, STire* pNew) {
+ if (pNew == NULL)
+ return false;
+
+ pthread_mutex_lock(&tiresMutex);
+ STire* pOld = tires[type];
+ if (pOld != NULL) {
+ // previous have value, release self ref count
+ if (--pOld->ref == 0) {
+ freeTire(pOld);
+ }
+ }
+
+ // set new
+ tires[type] = pNew;
+ tires[type]->ref = 1;
+ pthread_mutex_unlock(&tiresMutex);
+
+ return true;
+}
+
+// get ptr
+STire* getAutoPtr(int type) {
+ if (tires[type] == NULL) {
+ return NULL;
+ }
+
+ pthread_mutex_lock(&tiresMutex);
+ tires[type]->ref++;
+ pthread_mutex_unlock(&tiresMutex);
+
+ return tires[type];
+}
+
+// put back tire to tires[type], if tire not equal tires[type].p, need free tire
+void putBackAutoPtr(int type, STire* tire) {
+ if (tire == NULL) {
+ return ;
+ }
+
+ pthread_mutex_lock(&tiresMutex);
+ if (tires[type] != tire) {
+ //update by out, can't put back , so free
+ if (--tire->ref == 1) {
+ // support multi thread getAuotPtr
+ freeTire(tire);
+ }
+
+ } else {
+ tires[type]->ref--;
+ assert(tires[type]->ref > 0);
+ }
+ pthread_mutex_unlock(&tiresMutex);
+
+ return ;
+}
+
+
+
+//
+// ------------------- var Word --------------------------
+//
+
+#define MAX_CACHED_CNT 100000 // max cached rows 10w
+// write sql result to var name, return write rows cnt
+int writeVarNames(int type, TAOS_RES* tres) {
+ // fetch row
+ TAOS_ROW row = taos_fetch_row(tres);
+ if (row == NULL) {
+ return 0;
+ }
+
+ TAOS_FIELD *fields = taos_fetch_fields(tres);
+ // create new tires
+ char tireType = type == WT_VAR_TABLE ? TIRE_TREE : TIRE_LIST;
+ STire* tire = createTire(tireType);
+
+ // enum rows
+ char name[1024];
+ int numOfRows = 0;
+ do {
+ int32_t* lengths = taos_fetch_lengths(tres);
+ int32_t bytes = lengths[0];
+ if(fields[0].type == TSDB_DATA_TYPE_SMALLINT) {
+ sprintf(name,"%d", *(int16_t*)row[0]);
+ } else {
+ memcpy(name, row[0], bytes);
+ }
+
+ name[bytes] = 0; //set string end
+ // insert to tire
+ insertWord(tire, name);
+
+ if (++numOfRows > MAX_CACHED_CNT ) {
+ break;
+ }
+
+ row = taos_fetch_row(tres);
+ } while (row != NULL);
+
+ // replace old tire
+ setNewAuotPtr(type, tire);
+
+ return numOfRows;
+}
+
+bool firstMatchCommand(TAOS * con, Command * cmd);
+//
+// thread obtain var thread from db server
+//
+void* varObtainThread(void* param) {
+ int type = *(int* )param;
+ free(param);
+
+ if (varCon == NULL || type > WT_FROM_DB_MAX) {
+ return NULL;
+ }
+
+ TAOS_RES* pSql = taos_query_h(varCon, varSqls[type], NULL);
+ if (taos_errno(pSql)) {
+ taos_free_result(pSql);
+ return NULL;
+ }
+
+ // write var names from pSql
+ int cnt = writeVarNames(type, pSql);
+
+ // free sql
+ taos_free_result(pSql);
+
+ // check need call auto tab
+ if (cnt > 0 && waitAutoFill) {
+ // press tab key by program
+ firstMatchCommand(varCon, varCmd);
+ }
+
+ return NULL;
+}
+
+// only match next one word from all match words, return valuue must free by caller
+char* matchNextPrefix(STire* tire, char* pre) {
+ SMatch* match = NULL;
+
+ // re-use last result
+ if (lastMatch) {
+ if (strcmp(pre, lastMatch->pre) == 0) {
+ // same pre
+ match = lastMatch;
+ }
+ }
+
+ if (match == NULL) {
+ // not same with last result
+ if (pre[0] == 0) {
+ // EMPTY PRE
+ match = enumAll(tire);
+ } else {
+ // NOT EMPTY
+ match = matchPrefix(tire, pre, NULL);
+ }
+
+ // save to lastMatch
+ if (match) {
+ if (lastMatch)
+ freeMatch(lastMatch);
+ lastMatch = match;
+ }
+ }
+
+ // check valid
+ if (match == NULL || match->head == NULL) {
+ // no one matched
+ return false;
+ }
+
+ if (cursorVar == -1) {
+ // first
+ cursorVar = 0;
+ return strdup(match->head->word);
+ }
+
+ // according to cursorVar , calculate next one
+ int i = 0;
+ SMatchNode* item = match->head;
+ while (item) {
+ if (i == cursorVar + 1) {
+ // found next position ok
+ if (item->next == NULL) {
+ // match last item, reset cursorVar to head
+ cursorVar = -1;
+ } else {
+ cursorVar = i;
+ }
+
+ return strdup(item->word);
+ }
+
+ // check end item
+ if (item->next == NULL) {
+ // if cursorVar > var list count, return last and reset cursorVar
+ cursorVar = -1;
+
+ return strdup(item->word);
+ }
+
+ // move next
+ item = item->next;
+ i++;
+ }
+
+ return NULL;
+}
+
+// search pre word from tire tree, return value must free by caller
+char* tireSearchWord(int type, char* pre) {
+ if (type == WT_TEXT) {
+ return NULL;
+ }
+
+ if(type > WT_FROM_DB_MAX) {
+ // NOT FROM DB , tires[type] alwary not null
+ STire* tire = tires[type];
+ if (tire == NULL)
+ return NULL;
+ return matchNextPrefix(tire, pre);
+ }
+
+ // TYPE CONTEXT GET FROM DB
+ pthread_mutex_lock(&tiresMutex);
+
+ // check need obtain from server
+ if (tires[type] == NULL) {
+ waitAutoFill = true;
+ // need async obtain var names from db sever
+ if (threads[type] != NULL) {
+ if (taosThreadRunning(threads[type])) {
+ // thread running , need not obtain again, return
+ pthread_mutex_unlock(&tiresMutex);
+ return NULL;
+ }
+ // destroy previous thread handle for new create thread handle
+ taosDestroyThread(threads[type]);
+ threads[type] = NULL;
+ }
+
+ // create new
+ void * param = malloc(sizeof(int));
+ *((int* )param) = type;
+ threads[type] = taosCreateThread(varObtainThread, param);
+ pthread_mutex_unlock(&tiresMutex);
+ return NULL;
+ }
+ pthread_mutex_unlock(&tiresMutex);
+
+ // can obtain var names from local
+ STire* tire = getAutoPtr(type);
+ if (tire == NULL) {
+ return NULL;
+ }
+
+ char* str = matchNextPrefix(tire, pre);
+ // used finish, put back pointer to autoptr array
+ putBackAutoPtr(type, tire);
+
+ return str;
+}
+
+// match var word, word1 is pattern , word2 is input from shell
+bool matchVarWord(SWord* word1, SWord* word2) {
+ // search input word from tire tree
+ char pre[512];
+ memcpy(pre, word2->word, word2->len);
+ pre[word2->len] = 0;
+
+ char* str = NULL;
+ if (word1->type == WT_VAR_ALLTABLE) {
+ // ALL_TABLE
+ str = tireSearchWord(WT_VAR_STABLE, pre);
+ if (str == NULL) {
+ str = tireSearchWord(WT_VAR_TABLE, pre);
+ if(str == NULL)
+ return false;
+ }
+ } else {
+ // OTHER
+ str = tireSearchWord(word1->type, pre);
+ if (str == NULL) {
+ // not found or word1->type variable list not obtain from server, return not match
+ return false;
+ }
+ }
+
+ // free previous malloc
+ if(word1->free && word1->word) {
+ free(word1->word);
+ }
+
+ // save
+ word1->word = str;
+ word1->len = strlen(str);
+ word1->free = true; // need free
+
+ return true;
+}
+
+//
+// ------------------- match words --------------------------
+//
+
+
+// compare command cmd1 come from shellCommands , cmd2 come from user input
+int32_t compareCommand(SWords * cmd1, SWords * cmd2) {
+ SWord * word1 = cmd1->head;
+ SWord * word2 = cmd2->head;
+
+ if (word1 == NULL || word2 == NULL) {
+ return -1;
+ }
+
+ for (int32_t i = 0; i < cmd1->count; i++) {
+ if (word1->type == WT_TEXT) {
+ // WT_TEXT match
+ if (word1->len == word2->len) {
+ if (strncasecmp(word1->word, word2->word, word1->len) != 0)
+ return -1;
+ } else if (word1->len < word2->len) {
+ return -1;
+ } else {
+ // word1->len > word2->len
+ if (strncasecmp(word1->word, word2->word, word2->len) == 0) {
+ cmd1->matchIndex = i;
+ cmd1->matchLen = word2->len;
+ return i;
+ } else {
+ return -1;
+ }
+ }
+ } else {
+ // WT_VAR auto match any one word
+ if (word2->next == NULL) { // input words last one
+ if (matchVarWord(word1, word2)) {
+ cmd1->matchIndex = i;
+ cmd1->matchLen = word2->len;
+ varMode = true;
+ return i;
+ }
+ return -1;
+ }
+ }
+
+ // move next
+ word1 = word1->next;
+ word2 = word2->next;
+ if (word1 == NULL || word2 == NULL) {
+ return -1;
+ }
+ }
+
+ return -1;
+}
+
+// match command
+SWords * matchCommand(SWords * input, bool continueSearch) {
+ int32_t count = SHELL_COMMAND_COUNT();
+ for (int32_t i = 0; i < count; i ++) {
+ SWords * shellCommand = shellCommands + i;
+ if (continueSearch && lastMatchIndex != -1 && i <= lastMatchIndex) {
+ // new match must greate than lastMatchIndex
+ if (varMode && i == lastMatchIndex) {
+ // do nothing, var match on lastMatchIndex
+ } else {
+ continue;
+ }
+ }
+
+ // command is large
+ if (input->count > shellCommand->count ) {
+ continue;
+ }
+
+ // compare
+ int32_t index = compareCommand(shellCommand, input);
+ if (index != -1) {
+ if (firstMatchIndex == -1)
+ firstMatchIndex = i;
+ curMatchIndex = i;
+ return &shellCommands[i];
+ }
+ }
+
+ // not match
+ return NULL;
+}
+
+//
+// ------------------- print screen --------------------------
+//
+
+// delete char count
+void deleteCount(Command * cmd, int count) {
+ int size = 0;
+ int width = 0;
+ clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size);
+
+ // loop delete
+ while (--count >= 0 && cmd->cursorOffset > 0) {
+ getPrevCharSize(cmd->command, cmd->cursorOffset, &size, &width);
+ memmove(cmd->command + cmd->cursorOffset - size, cmd->command + cmd->cursorOffset,
+ cmd->commandSize - cmd->cursorOffset);
+ cmd->commandSize -= size;
+ cmd->cursorOffset -= size;
+ cmd->screenOffset -= width;
+ cmd->endOffset -= width;
+ }
+}
+
+// show screen
+void printScreen(TAOS * con, Command * cmd, SWords * match) {
+ // modify Command
+ if (firstMatchIndex == -1 || curMatchIndex == -1) {
+ // no match
+ return ;
+ }
+
+ // first tab press
+ const char * str = NULL;
+ int strLen = 0;
+
+ if (firstMatchIndex == curMatchIndex && lastWordBytes == -1) {
+ // first press tab
+ SWord * word = MATCH_WORD(match);
+ str = word->word + match->matchLen;
+ strLen = word->len - match->matchLen;
+ lastMatchIndex = firstMatchIndex;
+ lastWordBytes = word->len;
+ } else {
+ if (lastWordBytes == -1)
+ return ;
+ deleteCount(cmd, lastWordBytes);
+
+ SWord * word = MATCH_WORD(match);
+ str = word->word;
+ strLen = word->len;
+ // set current to last
+ lastMatchIndex = curMatchIndex;
+ lastWordBytes = word->len;
+ }
+
+ // insert new
+ insertChar(cmd, (char *)str, strLen);
+}
+
+
+// main key press tab , matched return true else false
+bool firstMatchCommand(TAOS * con, Command * cmd) {
+ // parse command
+ SWords* input = (SWords *)malloc(sizeof(SWords));
+ memset(input, 0, sizeof(SWords));
+ input->source = cmd->command;
+ input->source_len = cmd->commandSize;
+ parseCommand(input, false);
+
+ // if have many , default match first, if press tab again , switch to next
+ curMatchIndex = -1;
+ lastMatchIndex = -1;
+ SWords * match = matchCommand(input, true);
+ if (match == NULL) {
+ // not match , nothing to do
+ freeCommand(input);
+ free(input);
+ return false;
+ }
+
+ // print to screen
+ printScreen(con, cmd, match);
+ freeCommand(input);
+ free(input);
+ return true;
+}
+
+// create input source
+void createInputFromFirst(SWords* input, SWords * firstMatch) {
+ //
+ // if next pressTabKey , input context come from firstMatch, set matched length with source_len
+ //
+ input->source = (char*)malloc(1024);
+ memset((void* )input->source, 0, 1024);
+
+ SWord * word = firstMatch->head;
+
+ // source_len = full match word->len + half match with firstMatch->matchLen
+ for (int i = 0; i < firstMatch->matchIndex && word; i++) {
+ // combine source from each word
+ strncpy(input->source + input->source_len, word->word, word->len);
+ strcat(input->source, " "); // append blank splite
+ input->source_len += word->len + 1; // 1 is blank length
+ // move next
+ word = word->next;
+ }
+ // appand half matched word for last
+ if (word) {
+ strncpy(input->source + input->source_len, word->word, firstMatch->matchLen);
+ input->source_len += firstMatch->matchLen;
+ }
+}
+
+// user press Tabkey again is named next , matched return true else false
+bool nextMatchCommand(TAOS * con, Command * cmd, SWords * firstMatch) {
+ if (firstMatch == NULL || firstMatch->head == NULL) {
+ return false;
+ }
+ SWords* input = (SWords *)malloc(sizeof(SWords));
+ memset(input, 0, sizeof(SWords));
+
+ // create input from firstMatch
+ createInputFromFirst(input, firstMatch);
+
+ // parse input
+ parseCommand(input, false);
+
+ // if have many , default match first, if press tab again , switch to next
+ SWords * match = matchCommand(input, true);
+ if (match == NULL) {
+ // if not match , reset all index
+ firstMatchIndex = -1;
+ curMatchIndex = -1;
+ match = matchCommand(input, false);
+ if (match == NULL) {
+ freeCommand(input);
+ if (input->source)
+ free(input->source);
+ free(input);
+ return false;
+ }
+ }
+
+ // print to screen
+ printScreen(con, cmd, match);
+
+ // free
+ if (input->source) {
+ free(input->source);
+ input->source = NULL;
+ }
+ freeCommand(input);
+ free(input);
+
+ return true;
+}
+
+// fill with type
+bool fillWithType(TAOS * con, Command * cmd, char* pre, int type) {
+ // get type
+ STire* tire = tires[type];
+ char* str = matchNextPrefix(tire, pre);
+ if (str == NULL) {
+ return false;
+ }
+
+ // need insert part string
+ char * part = str + strlen(pre);
+
+ // show
+ int count = strlen(part);
+ insertChar(cmd, part, count);
+ cntDel = count; // next press tab delete current append count
+
+ free(str);
+ return true;
+}
+
+// fill with type
+bool fillTableName(TAOS * con, Command * cmd, char* pre) {
+ // search stable and table
+ char * str = tireSearchWord(WT_VAR_STABLE, pre);
+ if (str == NULL) {
+ str = tireSearchWord(WT_VAR_TABLE, pre);
+ if(str == NULL)
+ return false;
+ }
+
+ // need insert part string
+ char * part = str + strlen(pre);
+
+ // delete autofill count last append
+ if(cntDel > 0) {
+ deleteCount(cmd, cntDel);
+ cntDel = 0;
+ }
+
+ // show
+ int count = strlen(part);
+ insertChar(cmd, part, count);
+ cntDel = count; // next press tab delete current append count
+
+ free(str);
+ return true;
+}
+
+//
+// find last word from sql select clause
+// example :
+// 1 select cou -> press tab select count(
+// 2 select count(*),su -> select count(*), sum(
+// 3 select count(*), su -> select count(*), sum(
+//
+char * lastWord(char * p) {
+ // get near from end revert find ' ' and ','
+ char * p1 = strrchr(p, ' ');
+ char * p2 = strrchr(p, ',');
+
+ if (p1 && p2) {
+ return MAX(p1, p2) + 1;
+ } else if (p1) {
+ return p1 + 1;
+ } else if(p2) {
+ return p2 + 1;
+ } else {
+ return p;
+ }
+}
+
+bool fieldsInputEnd(char* sql) {
+ // not in '()'
+ char* p1 = strrchr(sql, '(');
+ char* p2 = strrchr(sql, ')');
+ if (p1 && p2 == NULL) {
+ // like select count( ' '
+ return false;
+ } else if (p1 && p2 && p1 > p2) {
+ // like select sum(age), count( ' '
+ return false;
+ }
+
+ // not in ','
+ char * p3 = strrchr(sql, ',');
+ char * p = p3;
+ // like select ts, age,' '
+ if (p) {
+ ++p;
+ bool allBlank = true; // after last ',' all char is blank
+ int cnt = 0; // blank count , like ' ' as one blank
+ char * plast = NULL; // last blank position
+ while(*p) {
+ if (*p == ' ') {
+ plast = p;
+ cnt ++;
+ } else {
+ allBlank = false;
+ }
+ ++p;
+ }
+
+ // any one word is not blank
+ if(allBlank) {
+ return false;
+ }
+
+ // like 'select count(*),sum(age) fr' need return true
+ if (plast && plast > p3 && p2 > p1 && plast > p2 && p1 > p3) {
+ return true;
+ }
+
+ // if last char not ' ', then not end field, like 'select count(*), su' can fill sum(
+ if(sql[strlen(sql)-1] != ' ' && cnt <= 1) {
+ return false;
+ }
+ }
+
+ char * p4 = strrchr(sql, ' ');
+ if(p4 == NULL) {
+ // only one word
+ return false;
+ }
+
+ return true;
+}
+
+// need insert from
+bool needInsertFrom(char * sql, int len) {
+ // last is blank
+ if(sql[len-1] != ' ') {
+ // insert from keyword
+ return false;
+ }
+
+ // select fields input is end
+ if (!fieldsInputEnd(sql)) {
+ return false;
+ }
+
+ // can insert from keyword
+ return true;
+}
+
+bool matchSelectQuery(TAOS * con, Command * cmd) {
+ // if continue press Tab , delete bytes by previous autofill
+ if (cntDel > 0) {
+ deleteCount(cmd, cntDel);
+ cntDel = 0;
+ }
+
+ // match select ...
+ int len = cmd->commandSize;
+ char * p = cmd->command;
+
+ // remove prefix blank
+ while (p[0] == ' ' && len > 0) {
+ p++;
+ len--;
+ }
+
+ // special range
+ if(len < 7 || len > 512) {
+ return false;
+ }
+
+ // select and from
+ if(strncasecmp(p, "select ", 7) != 0) {
+ // not select query clause
+ return false;
+ }
+ p += 7;
+ len -= 7;
+
+ char* ps = p = strndup(p, len);
+
+ // union all
+ char * p1;
+ do {
+ p1 = strstr(p, UNION_ALL);
+ if(p1) {
+ p = p1 + strlen(UNION_ALL);
+ }
+ } while (p1);
+
+ char * from = strstr(p, " from ");
+ //last word , maybe empty string or some letters of a string
+ char * last = lastWord(p);
+ bool ret = false;
+ if (from == NULL) {
+ bool fieldEnd = fieldsInputEnd(p);
+ // cheeck fields input end then insert from keyword
+ if (fieldEnd && p[len-1] == ' ') {
+ insertChar(cmd, "from", 4);
+ free(ps);
+ return true;
+ }
+
+ // fill funciton
+ if(fieldEnd) {
+ // fields is end , need match keyword
+ ret = fillWithType(con, cmd, last, WT_VAR_KEYWORD);
+ } else {
+ ret = fillWithType(con, cmd, last, WT_VAR_FUNC);
+ }
+
+ free(ps);
+ return ret;
+ }
+
+ // have from
+ char * blank = strstr(from + 6, " ");
+ if (blank == NULL) {
+ // no table name, need fill
+ ret = fillTableName(con, cmd, last);
+ } else {
+ ret = fillWithType(con, cmd, last, WT_VAR_KEYWORD);
+ }
+
+ free(ps);
+ return ret;
+}
+
+// if is input create fields or tags area, return true
+bool isCreateFieldsArea(char * p) {
+ char * left = strrchr(p, '(');
+ if (left == NULL) {
+ // like 'create table st'
+ return false;
+ }
+
+ char * right = strrchr(p, ')');
+ if(right == NULL) {
+ // like 'create table st( '
+ return true;
+ }
+
+ if (left > right) {
+ // like 'create table st( ts timestamp, age int) tags(area '
+ return true;
+ }
+
+ return false;
+}
+
+bool matchCreateTable(TAOS * con, Command * cmd) {
+ // if continue press Tab , delete bytes by previous autofill
+ if (cntDel > 0) {
+ deleteCount(cmd, cntDel);
+ cntDel = 0;
+ }
+
+ // match select ...
+ int len = cmd->commandSize;
+ char * p = cmd->command;
+
+ // remove prefix blank
+ while (p[0] == ' ' && len > 0) {
+ p++;
+ len--;
+ }
+
+ // special range
+ if(len < 7 || len > 1024) {
+ return false;
+ }
+
+ // select and from
+ if(strncasecmp(p, "create table ", 13) != 0) {
+ // not select query clause
+ return false;
+ }
+ p += 13;
+ len -= 13;
+
+ char* ps = strndup(p, len);
+ bool ret = false;
+ char * last = lastWord(ps);
+
+ // check in create fields or tags input area
+ if (isCreateFieldsArea(ps)) {
+ ret = fillWithType(con, cmd, last, WT_VAR_DATATYPE);
+ }
+
+ // tags
+ if (!ret) {
+ // find only one ')' , can insert tags
+ char * p1 = strchr(ps, ')');
+ if (p1) {
+ if(strchr(p1 + 1, ')') == NULL && strstr(p1 + 1, "tags") == NULL) {
+ // can insert tags keyword
+ ret = fillWithType(con, cmd, last, WT_VAR_KEYTAGS);
+ }
+ }
+ }
+
+ free(ps);
+ return ret;
+}
+
+bool matchOther(TAOS * con, Command * cmd) {
+ int len = cmd->commandSize;
+ char* p = cmd->command;
+
+ if (p[len - 1] == '\\') {
+ // append '\G'
+ char a[] = "G;";
+ insertChar(cmd, a, 2);
+ return true;
+ }
+
+ return false;
+}
+
+
+// main key press tab
+void pressTabKey(TAOS * con, Command * cmd) {
+ // check
+ if (cmd->commandSize == 0) {
+ // empty
+ showHelp();
+ showOnScreen(cmd);
+ return ;
+ }
+
+ // save connection to global
+ varCon = con;
+ varCmd = cmd;
+ bool matched = false;
+
+ // manual match like create table st( ...
+ matched = matchCreateTable(con, cmd);
+ if (matched)
+ return ;
+
+ // shellCommands match
+ if (firstMatchIndex == -1) {
+ matched = firstMatchCommand(con, cmd);
+ } else {
+ matched = nextMatchCommand(con, cmd, &shellCommands[firstMatchIndex]);
+ }
+ if (matched)
+ return ;
+
+ // NOT MATCHED ANYONE
+ // match other like '\G' ...
+ matched = matchOther(con, cmd);
+ if (matched)
+ return ;
+
+ // manual match like select * from ...
+ matched = matchSelectQuery(con, cmd);
+ if (matched)
+ return ;
+
+ return ;
+}
+
+// press othr key
+void pressOtherKey(char c) {
+ // reset global variant
+ firstMatchIndex = -1;
+ lastMatchIndex = -1;
+ curMatchIndex = -1;
+ lastWordBytes = -1;
+
+ // var names
+ cursorVar = -1;
+ varMode = false;
+ waitAutoFill = false;
+ cntDel = 0;
+
+ if (lastMatch) {
+ freeMatch(lastMatch);
+ lastMatch = NULL;
+ }
+}
+
+// put name into name, return name length
+int getWordName(char* p, char * name, int nameLen) {
+ //remove prefix blank
+ while (*p == ' ') {
+ p++;
+ }
+
+ // get databases name;
+ int i = 0;
+ while(p[i] != 0 && i < nameLen - 1) {
+ name[i] = p[i];
+ i++;
+ if(p[i] == ' ' || p[i] == ';'|| p[i] == '(') {
+ // name end
+ break;
+ }
+ }
+ name[i] = 0;
+
+ return i;
+}
+
+// deal use db, if have 'use' return true
+bool dealUseDB(char * sql) {
+ // check use keyword
+ if(strncasecmp(sql, "use ", 4) != 0) {
+ return false;
+ }
+
+ char db[256];
+ char *p = sql + 4;
+ if (getWordName(p, db, sizeof(db)) == 0) {
+ // no name , return
+ return true;
+ }
+
+ // dbName is previous use open db name
+ if (strcasecmp(db, dbName) == 0) {
+ // same , no need switch
+ return true;
+ }
+
+ // switch new db
+ pthread_mutex_lock(&tiresMutex);
+ // STABLE set null
+ STire* tire = tires[WT_VAR_STABLE];
+ tires[WT_VAR_STABLE] = NULL;
+ if(tire) {
+ freeTire(tire);
+ }
+ // TABLE set null
+ tire = tires[WT_VAR_TABLE];
+ tires[WT_VAR_TABLE] = NULL;
+ if(tire) {
+ freeTire(tire);
+ }
+ // save
+ strcpy(dbName, db);
+ pthread_mutex_unlock(&tiresMutex);
+
+ return true;
+}
+
+// deal create, if have 'create' return true
+bool dealCreateCommand(char * sql) {
+ // check keyword
+ if(strncasecmp(sql, "create ", 7) != 0) {
+ return false;
+ }
+
+ char name[1024];
+ char *p = sql + 7;
+ if (getWordName(p, name, sizeof(name)) == 0) {
+ // no name , return
+ return true;
+ }
+
+ int type = -1;
+ // dbName is previous use open db name
+ if (strcasecmp(name, "database") == 0) {
+ type = WT_VAR_DBNAME;
+ } else if (strcasecmp(name, "table") == 0) {
+ if(strstr(sql, " tags") != NULL && strstr(sql, " using ") == NULL)
+ type = WT_VAR_STABLE;
+ else
+ type = WT_VAR_TABLE;
+ } else if (strcasecmp(name, "user") == 0) {
+ type = WT_VAR_USERNAME;
+ } else {
+ // no match , return
+ return true;
+ }
+
+ // move next
+ p += strlen(name);
+
+ // get next word , that is table name
+ if (getWordName(p, name, sizeof(name)) == 0) {
+ // no name , return
+ return true;
+ }
+
+ // switch new db
+ pthread_mutex_lock(&tiresMutex);
+ // STABLE set null
+ STire* tire = tires[type];
+ if(tire) {
+ insertWord(tire, name);
+ }
+ pthread_mutex_unlock(&tiresMutex);
+
+ return true;
+}
+
+// deal create, if have 'drop' return true
+bool dealDropCommand(char * sql) {
+ // check keyword
+ if(strncasecmp(sql, "drop ", 5) != 0) {
+ return false;
+ }
+
+ char name[1024];
+ char *p = sql + 5;
+ if (getWordName(p, name, sizeof(name)) == 0) {
+ // no name , return
+ return true;
+ }
+
+ int type = -1;
+ // dbName is previous use open db name
+ if (strcasecmp(name, "database") == 0) {
+ type = WT_VAR_DBNAME;
+ } else if (strcasecmp(name, "table") == 0) {
+ type = WT_VAR_ALLTABLE;
+ } else if (strcasecmp(name, "dnode") == 0) {
+ type = WT_VAR_DNODEID;
+ } else if (strcasecmp(name, "user") == 0) {
+ type = WT_VAR_USERNAME;
+ } else {
+ // no match , return
+ return true;
+ }
+
+ // move next
+ p += strlen(name);
+
+ // get next word , that is table name
+ if (getWordName(p, name, sizeof(name)) == 0) {
+ // no name , return
+ return true;
+ }
+
+ // switch new db
+ pthread_mutex_lock(&tiresMutex);
+ // STABLE set null
+ if(type == WT_VAR_ALLTABLE) {
+ bool del = false;
+ // del in stable
+ STire* tire = tires[WT_VAR_STABLE];
+ if(tire)
+ del = deleteWord(tire, name);
+ // del in table
+ if(!del) {
+ tire = tires[WT_VAR_TABLE];
+ if(tire)
+ del = deleteWord(tire, name);
+ }
+ } else {
+ // OTHER TYPE
+ STire* tire = tires[type];
+ if(tire)
+ deleteWord(tire, name);
+ }
+ pthread_mutex_unlock(&tiresMutex);
+
+ return true;
+}
+
+// callback autotab module after shell sql execute
+void callbackAutoTab(char* sqlstr, TAOS* pSql, bool usedb) {
+ char * sql = sqlstr;
+ // remove prefix blank
+ while (*sql == ' ') {
+ sql++;
+ }
+
+ if(dealUseDB(sql)) {
+ // change to new db
+ return ;
+ }
+
+ // create command add name to autotab
+ if(dealCreateCommand(sql)) {
+ return ;
+ }
+
+ // drop command remove name from autotab
+ if(dealDropCommand(sql)) {
+ return ;
+ }
+
+ return ;
+}
diff --git a/src/kit/shell/src/shellCommand.c b/src/kit/shell/src/shellCommand.c
index d78e152dbdbc5c0144c65d50a32daadbce1cf534..2fe09691e3285c2e3031672404b0aa6ed7bac244 100644
--- a/src/kit/shell/src/shellCommand.c
+++ b/src/kit/shell/src/shellCommand.c
@@ -79,8 +79,11 @@ void insertChar(Command *cmd, char *c, int size) {
/* update the values */
cmd->commandSize += size;
cmd->cursorOffset += size;
- cmd->screenOffset += wcwidth(wc);
- cmd->endOffset += wcwidth(wc);
+ for (int i = 0; i < size; i++) {
+ mbtowc(&wc, c + i, size);
+ cmd->screenOffset += wcwidth(wc);
+ cmd->endOffset += wcwidth(wc);
+ }
showOnScreen(cmd);
}
@@ -179,6 +182,16 @@ void positionCursorHome(Command *cmd) {
}
}
+void positionCursorMiddle(Command *cmd) {
+ if (cmd->endOffset > 0) {
+ clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size);
+ cmd->cursorOffset = cmd->commandSize/2;
+ cmd->screenOffset = cmd->endOffset/2;
+ showOnScreen(cmd);
+ }
+}
+
+
void positionCursorEnd(Command *cmd) {
assert(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset);
diff --git a/src/kit/shell/src/shellDarwin.c b/src/kit/shell/src/shellDarwin.c
index 7803113a0f98f0152c0ab6da948252064c33c70d..0108d92e8f6a73317bc510857e17d8c81a62d512 100644
--- a/src/kit/shell/src/shellDarwin.c
+++ b/src/kit/shell/src/shellDarwin.c
@@ -22,6 +22,7 @@
#include "tkey.h"
#include "tscLog.h"
+#include "shellAuto.h"
#define OPT_ABORT 1 /* �Cabort */
@@ -255,7 +256,12 @@ int32_t shellReadCommand(TAOS *con, char *command) {
utf8_array[k] = c;
}
insertChar(&cmd, utf8_array, count);
+ pressOtherKey(c);
+ } else if (c == TAB_KEY) {
+ // press TAB key
+ pressTabKey(con, &cmd);
} else if (c < '\033') {
+ pressOtherKey(c);
// Ctrl keys. TODO: Implement ctrl combinations
switch (c) {
case 1: // ctrl A
@@ -301,6 +307,9 @@ int32_t shellReadCommand(TAOS *con, char *command) {
case 21: // Ctrl + U
clearLineBefore(&cmd);
break;
+ case 23: // Ctrl + W;
+ positionCursorMiddle(&cmd);
+ break;
}
} else if (c == '\033') {
c = getchar();
@@ -377,9 +386,11 @@ int32_t shellReadCommand(TAOS *con, char *command) {
break;
}
} else if (c == 0x7f) {
+ pressOtherKey(c);
// press delete key
backspaceChar(&cmd);
} else {
+ pressOtherKey(c);
insertChar(&cmd, &c, 1);
}
}
diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c
index ee822120812784d5293b1aab348e0f2bcba599b3..b0c3f4934d2d7180413b2e5a3463b8eb9da8428a 100644
--- a/src/kit/shell/src/shellEngine.c
+++ b/src/kit/shell/src/shellEngine.c
@@ -27,6 +27,7 @@
#include "tglobal.h"
#include "tsclient.h"
#include "cJSON.h"
+#include "shellAuto.h"
#include
@@ -327,6 +328,12 @@ void shellRunCommandOnServer(TAOS *con, char command[]) {
fprintf(stdout, "Database changed.\n\n");
fflush(stdout);
+#ifndef WINDOWS
+ // call back auto tab module
+ callbackAutoTab(command, pSql, true);
+#endif
+
+
atomic_store_64(&result, 0);
freeResultWithRid(oresult);
return;
@@ -365,6 +372,11 @@ void shellRunCommandOnServer(TAOS *con, char command[]) {
int num_rows_affacted = taos_affected_rows(pSql);
et = taosGetTimestampUs();
printf("Query OK, %d of %d row(s) in database (%.6fs)\n", num_rows_affacted, num_rows_affacted, (et - st) / 1E6);
+
+#ifndef WINDOWS
+ // call auto tab
+ callbackAutoTab(command, pSql, false);
+#endif
}
printf("\n");
@@ -1169,12 +1181,12 @@ int parse_cloud_dsn() {
}
}
char *port = strstr(args.cloudHost, ":");
- if ((port == NULL) || (port + strlen(":")) == NULL) {
+ if (port == NULL) {
fprintf(stderr, "Invalid format in TDengine cloud dsn: %s\n", args.cloudDsn);
return 1;
}
char *token = strstr(port + strlen(":"), "?token=");
- if ((token == NULL) || (token + strlen("?token=")) == NULL ||
+ if ((token == NULL) ||
(strlen(token + strlen("?token=")) == 0)) {
fprintf(stderr, "Invalid format in TDengine cloud dsn: %s\n", args.cloudDsn);
return -1;
@@ -1632,4 +1644,4 @@ void wsclient_query(char *command) {
}
cJSON_Delete(query);
return;
-}
\ No newline at end of file
+}
diff --git a/src/kit/shell/src/shellImport.c b/src/kit/shell/src/shellImport.c
index b3a07b257cbfdd639d6834e7981fb10e89e43512..e74c31729f07925bb130fb52a293f493fcfc5ccb 100644
--- a/src/kit/shell/src/shellImport.c
+++ b/src/kit/shell/src/shellImport.c
@@ -93,8 +93,8 @@ static void shellCheckTablesSQLFile(const char *directoryName)
{
sprintf(shellTablesSQLFile, "%s/tables.sql", directoryName);
- struct stat fstat;
- if (stat(shellTablesSQLFile, &fstat) < 0) {
+ struct stat status;
+ if (stat(shellTablesSQLFile, &status) < 0) {
shellTablesSQLFile[0] = 0;
}
}
diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c
index 863da2e1a727f073fe5eabf2b9d8d17f4e05c4b4..fd24a61c7d55c8a91ac9631a7263ca44b81d1606 100644
--- a/src/kit/shell/src/shellLinux.c
+++ b/src/kit/shell/src/shellLinux.c
@@ -20,6 +20,7 @@
#include "shellCommand.h"
#include "tkey.h"
#include "tulog.h"
+#include "shellAuto.h"
#define OPT_ABORT 1 /* �Cabort */
@@ -283,7 +284,12 @@ int32_t shellReadCommand(TAOS *con, char *command) {
utf8_array[k] = c;
}
insertChar(&cmd, utf8_array, count);
+ pressOtherKey(c);
+ } else if (c == TAB_KEY) {
+ // press TAB key
+ pressTabKey(con, &cmd);
} else if (c < '\033') {
+ pressOtherKey(c);
// Ctrl keys. TODO: Implement ctrl combinations
switch (c) {
case 1: // ctrl A
@@ -329,8 +335,12 @@ int32_t shellReadCommand(TAOS *con, char *command) {
case 21: // Ctrl + U;
clearLineBefore(&cmd);
break;
+ case 23: // Ctrl + W;
+ positionCursorMiddle(&cmd);
+ break;
}
} else if (c == '\033') {
+ pressOtherKey(c);
c = (char)getchar();
switch (c) {
case '[':
@@ -405,9 +415,11 @@ int32_t shellReadCommand(TAOS *con, char *command) {
break;
}
} else if (c == 0x7f) {
+ pressOtherKey(c);
// press delete key
backspaceChar(&cmd);
} else {
+ pressOtherKey(c);
insertChar(&cmd, &c, 1);
}
}
diff --git a/src/kit/shell/src/shellMain.c b/src/kit/shell/src/shellMain.c
index 149afc503e0f21b0bd347d9452a1811567bc5221..2dcf1052168e0de384ed666fa2f7c0044f5f618b 100644
--- a/src/kit/shell/src/shellMain.c
+++ b/src/kit/shell/src/shellMain.c
@@ -17,6 +17,8 @@
#include "shell.h"
#include "tconfig.h"
#include "tnettest.h"
+#include "shellCommand.h"
+#include "shellAuto.h"
pthread_t pid;
static tsem_t cancelSem;
@@ -162,10 +164,16 @@ int main(int argc, char* argv[]) {
/* Get grant information */
shellGetGrantInfo(args.con);
+#ifndef WINDOWS
+ shellAutoInit();
+#endif
/* Loop to query the input. */
while (1) {
pthread_create(&pid, NULL, shellLoopQuery, args.con);
pthread_join(pid, NULL);
}
+#ifndef WINDOWS
+ shellAutoExit();
+#endif
}
diff --git a/src/kit/shell/src/shellWindows.c b/src/kit/shell/src/shellWindows.c
index 9aab9f49cd1be5051f28e3c69887bd8766e8af75..0133caf997f60a17748a536371479c11b354888d 100644
--- a/src/kit/shell/src/shellWindows.c
+++ b/src/kit/shell/src/shellWindows.c
@@ -250,14 +250,15 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) {
if (args.cloudDsn == NULL) {
if (args.cloud) {
args.cloudDsn = getenv("TDENGINE_CLOUD_DSN");
- if (args.cloudDsn[strlen(args.cloudDsn) - 1] == '\"') {
- args.cloudDsn[strlen(args.cloudDsn) - 1] = '\0';
- }
- if (args.cloudDsn[0] == '\"') {
- args.cloudDsn += 1;
- }
if (args.cloudDsn == NULL) {
args.cloud = false;
+ } else {
+ if (args.cloudDsn[strlen(args.cloudDsn) - 1] == '\"') {
+ args.cloudDsn[strlen(args.cloudDsn) - 1] = '\0';
+ }
+ if (args.cloudDsn[0] == '\"') {
+ args.cloudDsn += 1;
+ }
}
}
} else {
@@ -434,4 +435,4 @@ int tcpConnect(char* host, int iport) {
return 1;
}
return 0;
-}
\ No newline at end of file
+}
diff --git a/src/kit/shell/src/tire.c b/src/kit/shell/src/tire.c
new file mode 100644
index 0000000000000000000000000000000000000000..b4dc7976bd53f11cccbac2f5db600edeeee861d5
--- /dev/null
+++ b/src/kit/shell/src/tire.c
@@ -0,0 +1,435 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#define __USE_XOPEN
+
+#include "os.h"
+#include "tire.h"
+
+// ----------- interface -------------
+
+// create prefix search tree
+STire* createTire(char type) {
+ STire* tire = malloc(sizeof(STire));
+ memset(tire, 0, sizeof(STire));
+ tire->ref = 1; // init is 1
+ tire->type = type;
+ tire->root.d = (STireNode **)calloc(CHAR_CNT, sizeof(STireNode *));
+ return tire;
+}
+
+// free tire node
+void freeTireNode(STireNode* node) {
+ if (node == NULL)
+ return ;
+
+ // nest free sub node on array d
+ if(node->d) {
+ for (int i = 0; i < CHAR_CNT; i++) {
+ freeTireNode(node->d[i]);
+ }
+ tfree(node->d);
+ }
+
+ // free self
+ tfree(node);
+}
+
+// destroy prefix search tree
+void freeTire(STire* tire) {
+ // free nodes
+ for (int i = 0; i < CHAR_CNT; i++) {
+ freeTireNode(tire->root.d[i]);
+ }
+ tfree(tire->root.d);
+
+ // free from list
+ StrName * item = tire->head;
+ while (item) {
+ StrName * next = item->next;
+ // free string
+ tfree(item->name);
+ // free node
+ tfree(item);
+
+ // move next
+ item = next;
+ }
+ tire->head = tire->tail = NULL;
+
+ // free tire
+ tfree(tire);
+}
+
+// insert a new word to list
+bool insertToList(STire* tire, char* word) {
+ StrName * p = (StrName *)malloc(sizeof(StrName));
+ p->name = strdup(word);
+ p->next = NULL;
+
+ if(tire->head == NULL) {
+ tire->head = p;
+ tire->tail = p;
+ }else {
+ tire->tail->next = p;
+ tire->tail = p;
+ }
+
+ return true;
+}
+
+// insert a new word to tree
+bool insertToTree(STire* tire, char* word, int len) {
+ int m = 0;
+ STireNode ** nodes = tire->root.d;
+ for (int i = 0; i < len; i++) {
+ m = word[i] - FIRST_ASCII;
+ if (m < 0 || m > CHAR_CNT) {
+ return false;
+ }
+
+ if (nodes[m] == NULL) {
+ // no pointer
+ STireNode* p = (STireNode* )tmalloc(sizeof(STireNode));
+ memset(p, 0, sizeof(STireNode));
+ nodes[m] = p;
+ if (i == len - 1) {
+ // is end
+ p->end = true;
+ break;
+ }
+ }
+
+ if (nodes[m]->d == NULL) {
+ // malloc d
+ nodes[m]->d = (STireNode **)calloc(CHAR_CNT, sizeof(STireNode *));
+ }
+
+ // move to next node
+ nodes = nodes[m]->d;
+ }
+
+ // add count
+ tire->count += 1;
+ return true;
+}
+
+// insert a new word
+bool insertWord(STire* tire, char* word) {
+ int len = strlen(word);
+ if (len >= MAX_WORD_LEN) {
+ return false;
+ }
+
+ switch (tire->type) {
+ case TIRE_TREE:
+ return insertToTree(tire, word, len);
+ case TIRE_LIST:
+ return insertToList(tire, word);
+ default:
+ break;
+ }
+ return false;
+}
+
+// delete one word from list
+bool deleteFromList(STire* tire, char* word) {
+ StrName * item = tire->head;
+ while (item) {
+ if (strcmp(item->name, word) == 0) {
+ // found, reset empty to delete
+ item->name[0] = 0;
+ }
+
+ // move next
+ item = item->next;
+ }
+ return true;
+}
+
+// delete one word from tree
+bool deleteFromTree(STire* tire, char* word, int len) {
+ int m = 0;
+ bool del = false;
+
+ STireNode** nodes = tire->root.d;
+ for (int i = 0; i < len; i++) {
+ m = word[i] - FIRST_ASCII;
+ if (m < 0 || m >= CHAR_CNT) {
+ return false;
+ }
+
+ if (nodes[m] == NULL) {
+ // no found
+ return false;
+ } else {
+ // not null
+ if(i == len - 1) {
+ // this is last, only set end false , not free node
+ nodes[m]->end = false;
+ del = true;
+ break;
+ }
+ }
+
+ if(nodes[m]->d == NULL)
+ break;
+ // move to next node
+ nodes = nodes[m]->d;
+ }
+
+ // reduce count
+ if (del) {
+ tire->count -= 1;
+ }
+
+ return del;
+}
+
+// insert a new word
+bool deleteWord(STire* tire, char* word) {
+ int len = strlen(word);
+ if (len >= MAX_WORD_LEN) {
+ return false;
+ }
+
+ switch (tire->type) {
+ case TIRE_TREE:
+ return deleteFromTree(tire, word, len);
+ case TIRE_LIST:
+ return deleteFromList(tire, word);
+ default:
+ break;
+ }
+ return false;
+}
+
+void addWordToMatch(SMatch* match, char* word){
+ // malloc new
+ SMatchNode* node = (SMatchNode* )tmalloc(sizeof(SMatchNode));
+ memset(node, 0, sizeof(SMatchNode));
+ node->word = strdup(word);
+
+ // append to match
+ if (match->head == NULL) {
+ match->head = match->tail = node;
+ } else {
+ match->tail->next = node;
+ match->tail = node;
+ }
+ match->count += 1;
+}
+
+// enum all words from node
+void enumAllWords(STireNode** nodes, char* prefix, SMatch* match) {
+ STireNode * c;
+ char word[MAX_WORD_LEN];
+ int len = strlen(prefix);
+ for (int i = 0; i < CHAR_CNT; i++) {
+ c = nodes[i];
+
+ if (c == NULL) {
+ // chain end node
+ continue;
+ } else {
+ // combine word string
+ memset(word, 0, sizeof(word));
+ strcpy(word, prefix);
+ word[len] = FIRST_ASCII + i; // append current char
+
+ // chain middle node
+ if (c->end) {
+ // have end flag
+ addWordToMatch(match, word);
+ }
+ // nested call next layer
+ if (c->d)
+ enumAllWords(c->d, word, match);
+ }
+ }
+}
+
+// match prefix from list
+void matchPrefixFromList(STire* tire, char* prefix, SMatch* match) {
+ StrName * item = tire->head;
+ int len = strlen(prefix);
+ while (item) {
+ if ( strncmp(item->name, prefix, len) == 0) {
+ // prefix matched
+ addWordToMatch(match, item->name);
+ }
+
+ // move next
+ item = item->next;
+ }
+}
+
+// match prefix words, if match is not NULL , put all item to match and return match
+void matchPrefixFromTree(STire* tire, char* prefix, SMatch* match) {
+ SMatch* root = match;
+ int m = 0;
+ STireNode* c = 0;
+ int len = strlen(prefix);
+ if (len >= MAX_WORD_LEN) {
+ return;
+ }
+
+ STireNode** nodes = tire->root.d;
+ for (int i = 0; i < len; i++) {
+ m = prefix[i] - FIRST_ASCII;
+ if (m < 0 || m > CHAR_CNT) {
+ return;
+ }
+
+ // match
+ c = nodes[m];
+ if (c == NULL) {
+ // arrive end
+ break;
+ }
+
+ // previous items already matched
+ if (i == len - 1) {
+ // malloc match if not pass by param match
+ if (root == NULL) {
+ root = (SMatch* )tmalloc(sizeof(SMatch));
+ memset(root, 0, sizeof(SMatch));
+ strcpy(root->pre, prefix);
+ }
+
+ // prefix is match to end char
+ if (c->d)
+ enumAllWords(c->d, prefix, root);
+ } else {
+ // move to next node continue match
+ if(c->d == NULL)
+ break;
+ nodes = c->d;
+ }
+ }
+
+ // return
+ return ;
+}
+
+SMatch* matchPrefix(STire* tire, char* prefix, SMatch* match) {
+ if(match == NULL) {
+ match = (SMatch* )tmalloc(sizeof(SMatch));
+ memset(match, 0, sizeof(SMatch));
+ }
+
+ switch (tire->type) {
+ case TIRE_TREE:
+ matchPrefixFromTree(tire, prefix, match);
+ case TIRE_LIST:
+ matchPrefixFromList(tire, prefix, match);
+ default:
+ break;
+ }
+
+ // return if need
+ if (match->count == 0) {
+ freeMatch(match);
+ match = NULL;
+ }
+
+ return match;
+}
+
+
+// get all items from tires tree
+void enumFromList(STire* tire, SMatch* match) {
+ StrName * item = tire->head;
+ while (item) {
+ if (item->name[0] != 0) {
+ // not delete
+ addWordToMatch(match, item->name);
+ }
+
+ // move next
+ item = item->next;
+ }
+}
+
+// get all items from tires tree
+void enumFromTree(STire* tire, SMatch* match) {
+ char pre[2] ={0, 0};
+ STireNode* c;
+
+ // enum first layer
+ for (int i = 0; i < CHAR_CNT; i++) {
+ pre[0] = FIRST_ASCII + i;
+
+ // each node
+ c = tire->root.d[i];
+ if (c == NULL) {
+ // this branch no data
+ continue;
+ }
+
+ // this branch have data
+ if(c->end)
+ addWordToMatch(match, pre);
+ else
+ matchPrefix(tire, pre, match);
+ }
+}
+
+// get all items from tires tree
+SMatch* enumAll(STire* tire) {
+ SMatch* match = (SMatch* )tmalloc(sizeof(SMatch));
+ memset(match, 0, sizeof(SMatch));
+
+ switch (tire->type) {
+ case TIRE_TREE:
+ enumFromTree(tire, match);
+ case TIRE_LIST:
+ enumFromList(tire, match);
+ default:
+ break;
+ }
+
+ // return if need
+ if (match->count == 0) {
+ freeMatch(match);
+ match = NULL;
+ }
+
+ return match;
+}
+
+
+// free match result
+void freeMatchNode(SMatchNode* node) {
+ // first free next
+ if (node->next)
+ freeMatchNode(node->next);
+
+ // second free self
+ if (node->word)
+ free(node->word);
+ free(node);
+}
+
+// free match result
+void freeMatch(SMatch* match) {
+ // first free next
+ if (match->head) {
+ freeMatchNode(match->head);
+ }
+
+ // second free self
+ free(match);
+}
diff --git a/src/kit/taos-tools b/src/kit/taos-tools
index 5fdd694621fbb7bd2d6102ff4feaec92a7001038..f84cb6e51556d8030585128c2b252aa2a6453328 160000
--- a/src/kit/taos-tools
+++ b/src/kit/taos-tools
@@ -1 +1 @@
-Subproject commit 5fdd694621fbb7bd2d6102ff4feaec92a7001038
+Subproject commit f84cb6e51556d8030585128c2b252aa2a6453328
diff --git a/src/mnode/src/mnodeAcct.c b/src/mnode/src/mnodeAcct.c
index 64cfa28917cf6923230f0b7f70500c921c0d5a84..0ec330841caaa288c418bd272f776ca7563ebd63 100644
--- a/src/mnode/src/mnodeAcct.c
+++ b/src/mnode/src/mnodeAcct.c
@@ -215,7 +215,7 @@ static int32_t mnodeCreateRootAcct() {
taosEncryptPass((uint8_t *)TSDB_DEFAULT_PASS, strlen(TSDB_DEFAULT_PASS), pAcct->pass);
pAcct->cfg = (SAcctCfg){
.maxUsers = 128,
- .maxDbs = 128,
+ .maxDbs = INT16_MAX,
.maxTimeSeries = INT32_MAX,
.maxConnections = 1024,
.maxStreams = 1000,
diff --git a/src/mnode/src/mnodeCluster.c b/src/mnode/src/mnodeCluster.c
index 553e8446ab449cb3eab8bcc3c15bef8715fe978a..e8f7484fd13afc7117956f3b8bbf4cac5f17f0c3 100644
--- a/src/mnode/src/mnodeCluster.c
+++ b/src/mnode/src/mnodeCluster.c
@@ -145,8 +145,8 @@ static int32_t mnodeCreateCluster() {
SClusterObj *pCluster = malloc(sizeof(SClusterObj));
memset(pCluster, 0, sizeof(SClusterObj));
pCluster->createdTime = taosGetTimestampMs();
- bool getuid = taosGetSystemUid(pCluster->uid);
- if (!getuid) {
+ bool bGetuid = taosGetSystemUid(pCluster->uid);
+ if (!bGetuid) {
strcpy(pCluster->uid, "tdengine2.0");
mError("failed to get uid from system, set to default val %s", pCluster->uid);
} else {
@@ -260,4 +260,4 @@ int32_t mnodeCompactCluster() {
mInfo("end to compact cluster table...");
return 0;
-}
\ No newline at end of file
+}
diff --git a/src/mnode/src/mnodeMnode.c b/src/mnode/src/mnodeMnode.c
index 13dd06bcac733694475eee7be718afdc6c17466e..491d2e4b3603777466868736343c5b1135bd6bb0 100644
--- a/src/mnode/src/mnodeMnode.c
+++ b/src/mnode/src/mnodeMnode.c
@@ -210,7 +210,7 @@ void mnodeUpdateMnodeEpSet(SMInfos *pMinfos) {
mInfos = *pMinfos;
} else {
mInfo("vgId:1, update mnodes epSet, numOfMnodes:%d", mnodeGetMnodesNum());
- int32_t index = 0;
+ int32_t idx = 0;
void * pIter = NULL;
while (1) {
SMnodeObj *pMnode = NULL;
@@ -220,10 +220,10 @@ void mnodeUpdateMnodeEpSet(SMInfos *pMinfos) {
SDnodeObj *pDnode = mnodeGetDnode(pMnode->mnodeId);
if (pDnode != NULL) {
set = true;
- mInfos.mnodeInfos[index].mnodeId = pMnode->mnodeId;
- strcpy(mInfos.mnodeInfos[index].mnodeEp, pDnode->dnodeEp);
- if (pMnode->role == TAOS_SYNC_ROLE_MASTER) mInfos.inUse = index;
- index++;
+ mInfos.mnodeInfos[idx].mnodeId = pMnode->mnodeId;
+ strcpy(mInfos.mnodeInfos[idx].mnodeEp, pDnode->dnodeEp);
+ if (pMnode->role == TAOS_SYNC_ROLE_MASTER) mInfos.inUse = idx;
+ idx++;
} else {
set = false;
}
@@ -232,7 +232,7 @@ void mnodeUpdateMnodeEpSet(SMInfos *pMinfos) {
mnodeDecMnodeRef(pMnode);
}
- mInfos.mnodeNum = index;
+ mInfos.mnodeNum = idx;
if (mInfos.mnodeNum < sdbGetReplicaNum()) {
set = false;
mDebug("vgId:1, mnodes info not synced, current:%d syncCfgNum:%d", mInfos.mnodeNum, sdbGetReplicaNum());
@@ -251,23 +251,23 @@ void mnodeUpdateMnodeEpSet(SMInfos *pMinfos) {
tsMEpForPeer.numOfEps = tsMInfos.mnodeNum;
mInfo("vgId:1, mnodes epSet is set, num:%d inUse:%d", tsMInfos.mnodeNum, tsMInfos.inUse);
- for (int index = 0; index < mInfos.mnodeNum; ++index) {
- SMInfo *pInfo = &tsMInfos.mnodeInfos[index];
- taosGetFqdnPortFromEp(pInfo->mnodeEp, tsMEpForShell.fqdn[index], &tsMEpForShell.port[index]);
- taosGetFqdnPortFromEp(pInfo->mnodeEp, tsMEpForPeer.fqdn[index], &tsMEpForPeer.port[index]);
- tsMEpForPeer.port[index] = tsMEpForPeer.port[index] + TSDB_PORT_DNODEDNODE;
+ for (int idx = 0; idx < mInfos.mnodeNum; ++idx) {
+ SMInfo *pInfo = &tsMInfos.mnodeInfos[idx];
+ taosGetFqdnPortFromEp(pInfo->mnodeEp, tsMEpForShell.fqdn[idx], &tsMEpForShell.port[idx]);
+ taosGetFqdnPortFromEp(pInfo->mnodeEp, tsMEpForPeer.fqdn[idx], &tsMEpForPeer.port[idx]);
+ tsMEpForPeer.port[idx] = tsMEpForPeer.port[idx] + TSDB_PORT_DNODEDNODE;
- mInfo("vgId:1, mnode:%d, fqdn:%s shell:%u peer:%u", pInfo->mnodeId, tsMEpForShell.fqdn[index],
- tsMEpForShell.port[index], tsMEpForPeer.port[index]);
+ mInfo("vgId:1, mnode:%d, fqdn:%s shell:%u peer:%u", pInfo->mnodeId, tsMEpForShell.fqdn[idx],
+ tsMEpForShell.port[idx], tsMEpForPeer.port[idx]);
- tsMEpForShell.port[index] = htons(tsMEpForShell.port[index]);
- tsMEpForPeer.port[index] = htons(tsMEpForPeer.port[index]);
+ tsMEpForShell.port[idx] = htons(tsMEpForShell.port[idx]);
+ tsMEpForPeer.port[idx] = htons(tsMEpForPeer.port[idx]);
pInfo->mnodeId = htonl(pInfo->mnodeId);
}
} else {
mInfo("vgId:1, mnodes epSet not set, num:%d inUse:%d", tsMInfos.mnodeNum, tsMInfos.inUse);
- for (int index = 0; index < tsMInfos.mnodeNum; ++index) {
- mInfo("vgId:1, index:%d, ep:%s:%u", index, tsMEpForShell.fqdn[index], htons(tsMEpForShell.port[index]));
+ for (int idx = 0; idx < tsMInfos.mnodeNum; ++idx) {
+ mInfo("vgId:1, index:%d, ep:%s:%u", idx, tsMEpForShell.fqdn[idx], htons(tsMEpForShell.port[idx]));
}
}
@@ -603,4 +603,4 @@ int32_t mnodeCompactMnodes() {
mInfo("end to compact mnodes table...");
return 0;
-}
\ No newline at end of file
+}
diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c
index 1e3057f27021e8874b96c0116dd0319fcd999da7..cb39c2ae2bc6d93bfe154dcb01535c398a98b6a9 100644
--- a/src/mnode/src/mnodeSdb.c
+++ b/src/mnode/src/mnodeSdb.c
@@ -331,7 +331,7 @@ int32_t sdbUpdateSync(void *pMnodes) {
mDebug("vgId:1, update sync config, pMnodes:%p", pMnodes);
SSyncCfg syncCfg = {0};
- int32_t index = 0;
+ int32_t idx = 0;
if (pMinfos == NULL) {
mDebug("vgId:1, mInfos not input, use mInfos in sdb, numOfMnodes:%d", syncCfg.replica);
@@ -342,29 +342,29 @@ int32_t sdbUpdateSync(void *pMnodes) {
pIter = mnodeGetNextMnode(pIter, &pMnode);
if (pMnode == NULL) break;
- syncCfg.nodeInfo[index].nodeId = pMnode->mnodeId;
+ syncCfg.nodeInfo[idx].nodeId = pMnode->mnodeId;
SDnodeObj *pDnode = mnodeGetDnode(pMnode->mnodeId);
if (pDnode != NULL) {
- syncCfg.nodeInfo[index].nodePort = pDnode->dnodePort + TSDB_PORT_SYNC;
- tstrncpy(syncCfg.nodeInfo[index].nodeFqdn, pDnode->dnodeFqdn, TSDB_FQDN_LEN);
- index++;
+ syncCfg.nodeInfo[idx].nodePort = pDnode->dnodePort + TSDB_PORT_SYNC;
+ tstrncpy(syncCfg.nodeInfo[idx].nodeFqdn, pDnode->dnodeFqdn, TSDB_FQDN_LEN);
+ idx++;
}
mnodeDecDnodeRef(pDnode);
mnodeDecMnodeRef(pMnode);
}
- syncCfg.replica = index;
+ syncCfg.replica = idx;
} else {
mDebug("vgId:1, mInfos input, numOfMnodes:%d", pMinfos->mnodeNum);
- for (index = 0; index < pMinfos->mnodeNum; ++index) {
- SMInfo *node = &pMinfos->mnodeInfos[index];
- syncCfg.nodeInfo[index].nodeId = node->mnodeId;
- taosGetFqdnPortFromEp(node->mnodeEp, syncCfg.nodeInfo[index].nodeFqdn, &syncCfg.nodeInfo[index].nodePort);
- syncCfg.nodeInfo[index].nodePort += TSDB_PORT_SYNC;
+ for (idx = 0; idx < pMinfos->mnodeNum; ++idx) {
+ SMInfo *node = &pMinfos->mnodeInfos[idx];
+ syncCfg.nodeInfo[idx].nodeId = node->mnodeId;
+ taosGetFqdnPortFromEp(node->mnodeEp, syncCfg.nodeInfo[idx].nodeFqdn, &syncCfg.nodeInfo[idx].nodePort);
+ syncCfg.nodeInfo[idx].nodePort += TSDB_PORT_SYNC;
}
- syncCfg.replica = index;
+ syncCfg.replica = idx;
mnodeUpdateMnodeEpSet(pMnodes);
}
diff --git a/src/os/src/detail/osDir.c b/src/os/src/detail/osDir.c
index 17c844ed863c227fe1178b7d99fee4a300a0b3e2..d867c80af4cbf8906a33295f82158f03f3380cf9 100644
--- a/src/os/src/detail/osDir.c
+++ b/src/os/src/detail/osDir.c
@@ -45,8 +45,8 @@ void taosRemoveDir(char *rootDir) {
uInfo("dir:%s is removed", rootDir);
}
-bool taosDirExist(const char* dirname) {
- return access(dirname, F_OK) == 0;
+bool taosDirExist(const char* dir) {
+ return access(dir, F_OK) == 0;
}
int32_t taosMkdirP(const char *dir, int keepLast) {
diff --git a/src/os/src/detail/osFile.c b/src/os/src/detail/osFile.c
index 910e6f15be48e9a757b87939dd95b3541967f9c3..6adcd1dae31bfa3c6365a73700b43020b4ab088a 100644
--- a/src/os/src/detail/osFile.c
+++ b/src/os/src/detail/osFile.c
@@ -44,11 +44,11 @@ void taosGetTmpfilePath(const char *fileNamePrefix, char *dstPath) {
strcat(tmpPath, "-%d-%s");
}
- char rand[32] = {0};
+ char rand_num[32] = {0};
- sprintf(rand, "%" PRIu64, atomic_add_fetch_64(&seqId, 1));
+ sprintf(rand_num, "%" PRIu64, atomic_add_fetch_64(&seqId, 1));
- snprintf(dstPath, PATH_MAX, tmpPath, getpid(), rand);
+ snprintf(dstPath, PATH_MAX, tmpPath, getpid(), rand_num);
}
#else
@@ -71,11 +71,11 @@ void taosGetTmpfilePath(const char *fileNamePrefix, char *dstPath) {
strcat(tmpPath, "-%d-%s");
}
- char rand[32] = {0};
+ char rand_num[32] = {0};
- sprintf(rand, "%" PRIu64, atomic_add_fetch_64(&seqId, 1));
+ sprintf(rand_num, "%" PRIu64, atomic_add_fetch_64(&seqId, 1));
- snprintf(dstPath, PATH_MAX, tmpPath, getpid(), rand);
+ snprintf(dstPath, PATH_MAX, tmpPath, getpid(), rand_num);
}
#endif
diff --git a/src/os/src/detail/osTime.c b/src/os/src/detail/osTime.c
index 1575b3100f78a17b9bbe9dcb29b971054c55b294..3590703695ea5f03f237d3dd36f7b1ebc8dc73d9 100644
--- a/src/os/src/detail/osTime.c
+++ b/src/os/src/detail/osTime.c
@@ -82,26 +82,26 @@ void deltaToUtcInitOnce() {
}
static int64_t parseFraction(char* str, char** end, int32_t timePrec);
-static int32_t parseTimeWithTz(char* timestr, int64_t* time, int32_t timePrec, char delim);
-static int32_t parseLocaltime(char* timestr, int64_t* time, int32_t timePrec, char delim, bool withDST);
+static int32_t parseTimeWithTz(char* timestr, int64_t* pTime, int32_t timePrec, char delim);
+static int32_t parseLocaltime(char* timestr, int64_t* pTime, int32_t timePrec, char delim, bool withDST);
static char* forwardToTimeStringEnd(char* str);
static bool checkTzPresent(char *str, int32_t len);
int32_t taosGetTimestampSec() { return (int32_t)time(NULL); }
-int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t day_light) {
+int32_t taosParseTime(char* timestr, int64_t* pTime, int32_t len, int32_t timePrec, int8_t day_light) {
/* parse datatime string in with tz */
if (strnchr(timestr, 'T', len, false) != NULL) {
if (checkTzPresent(timestr, len)) {
- return parseTimeWithTz(timestr, time, timePrec, 'T');
+ return parseTimeWithTz(timestr, pTime, timePrec, 'T');
} else {
- return parseLocaltime(timestr, time, timePrec, 'T', day_light);
+ return parseLocaltime(timestr, pTime, timePrec, 'T', day_light);
}
} else {
if (checkTzPresent(timestr, len)) {
- return parseTimeWithTz(timestr, time, timePrec, 0);
+ return parseTimeWithTz(timestr, pTime, timePrec, 0);
} else {
- return parseLocaltime(timestr, time, timePrec, 0, day_light);
+ return parseLocaltime(timestr, pTime, timePrec, 0, day_light);
}
}
}
@@ -121,8 +121,8 @@ bool checkTzPresent(char *str, int32_t len) {
}
-FORCE_INLINE int32_t taos_parse_time(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t day_light) {
- return taosParseTime(timestr, time, len, timePrec, day_light);
+FORCE_INLINE int32_t taos_parse_time(char* timestr, int64_t* pTime, int32_t len, int32_t timePrec, int8_t day_light) {
+ return taosParseTime(timestr, pTime, len, timePrec, day_light);
}
char* forwardToTimeStringEnd(char* str) {
@@ -243,7 +243,7 @@ int32_t parseTimezone(char* str, int64_t* tzOffset) {
* 2013-04-12T15:52:01+0800
* 2013-04-12T15:52:01.123+0800
*/
-int32_t parseTimeWithTz(char* timestr, int64_t* time, int32_t timePrec, char delim) {
+int32_t parseTimeWithTz(char* timestr, int64_t* pTime, int32_t timePrec, char delim) {
int64_t factor = (timePrec == TSDB_TIME_PRECISION_MILLI) ? 1000 :
(timePrec == TSDB_TIME_PRECISION_MICRO ? 1000000 : 1000000000);
@@ -277,14 +277,14 @@ int32_t parseTimeWithTz(char* timestr, int64_t* time, int32_t timePrec, char del
if ((str[0] == 'Z' || str[0] == 'z') && str[1] == '\0') {
/* utc time, no millisecond, return directly*/
- *time = seconds * factor;
+ *pTime = seconds * factor;
} else if (str[0] == '.') {
str += 1;
if ((fraction = parseFraction(str, &str, timePrec)) < 0) {
return -1;
}
- *time = seconds * factor + fraction;
+ *pTime = seconds * factor + fraction;
char seg = str[0];
if (seg != 'Z' && seg != 'z' && seg != '+' && seg != '-') {
@@ -297,18 +297,18 @@ int32_t parseTimeWithTz(char* timestr, int64_t* time, int32_t timePrec, char del
return -1;
}
- *time += tzOffset * factor;
+ *pTime += tzOffset * factor;
}
} else if (str[0] == '+' || str[0] == '-') {
- *time = seconds * factor + fraction;
+ *pTime = seconds * factor + fraction;
// parse the timezone
if (parseTimezone(str, &tzOffset) == -1) {
return -1;
}
- *time += tzOffset * factor;
+ *pTime += tzOffset * factor;
} else {
return -1;
}
@@ -316,8 +316,8 @@ int32_t parseTimeWithTz(char* timestr, int64_t* time, int32_t timePrec, char del
return 0;
}
-int32_t parseLocaltime(char* timestr, int64_t* time, int32_t timePrec, char delim, bool withDST) {
- *time = 0;
+int32_t parseLocaltime(char* timestr, int64_t* pTime, int32_t timePrec, char delim, bool withDST) {
+ *pTime = 0;
struct tm tm = {0};
if (withDST) {
tm.tm_isdst = -1;
@@ -365,65 +365,65 @@ int32_t parseLocaltime(char* timestr, int64_t* time, int32_t timePrec, char deli
int64_t factor = (timePrec == TSDB_TIME_PRECISION_MILLI) ? 1000 :
(timePrec == TSDB_TIME_PRECISION_MICRO ? 1000000 : 1000000000);
- *time = factor * seconds + fraction;
+ *pTime = factor * seconds + fraction;
return 0;
}
-int64_t convertTimePrecision(int64_t time, int32_t fromPrecision, int32_t toPrecision) {
+int64_t convertTimePrecision(int64_t timeStamp, int32_t fromPrecision, int32_t toPrecision) {
assert(fromPrecision == TSDB_TIME_PRECISION_MILLI ||
fromPrecision == TSDB_TIME_PRECISION_MICRO ||
fromPrecision == TSDB_TIME_PRECISION_NANO);
assert(toPrecision == TSDB_TIME_PRECISION_MILLI ||
toPrecision == TSDB_TIME_PRECISION_MICRO ||
toPrecision == TSDB_TIME_PRECISION_NANO);
- double tempResult = (double)time;
+ double tempResult = (double)timeStamp;
switch(fromPrecision) {
case TSDB_TIME_PRECISION_MILLI: {
switch (toPrecision) {
case TSDB_TIME_PRECISION_MILLI:
- return time;
+ return timeStamp;
case TSDB_TIME_PRECISION_MICRO:
tempResult *= 1000;
- time *= 1000;
+ timeStamp *= 1000;
goto end_;
case TSDB_TIME_PRECISION_NANO:
tempResult *= 1000000;
- time *= 1000000;
+ timeStamp *= 1000000;
goto end_;
}
} // end from milli
case TSDB_TIME_PRECISION_MICRO: {
switch (toPrecision) {
case TSDB_TIME_PRECISION_MILLI:
- return time / 1000;
+ return timeStamp / 1000;
case TSDB_TIME_PRECISION_MICRO:
- return time;
+ return timeStamp;
case TSDB_TIME_PRECISION_NANO:
tempResult *= 1000;
- time *= 1000;
+ timeStamp *= 1000;
goto end_;
}
} //end from micro
case TSDB_TIME_PRECISION_NANO: {
switch (toPrecision) {
case TSDB_TIME_PRECISION_MILLI:
- return time / 1000000;
+ return timeStamp / 1000000;
case TSDB_TIME_PRECISION_MICRO:
- return time / 1000;
+ return timeStamp / 1000;
case TSDB_TIME_PRECISION_NANO:
- return time;
+ return timeStamp;
}
} //end from nano
default: {
assert(0);
- return time; // only to pass windows compilation
+ return timeStamp; // only to pass windows compilation
}
} //end switch fromPrecision
end_:
if (tempResult >= (double)INT64_MAX) return INT64_MAX;
if (tempResult <= (double)INT64_MIN) return INT64_MIN + 1; // INT64_MIN means NULL
- return time;
+ return timeStamp;
}
static int32_t getDuration(int64_t val, char unit, int64_t* result, int32_t timePrecision) {
diff --git a/src/os/src/linux/osSystem.c b/src/os/src/linux/osSystem.c
index a82149dccb1f71e6fbdc2b62d066f04ff52c251d..d2d9d6d76cf093b1a4d3dcfcb48b3f745e6b7376 100644
--- a/src/os/src/linux/osSystem.c
+++ b/src/os/src/linux/osSystem.c
@@ -33,9 +33,9 @@ void* taosLoadDll(const char *filename) {
void* taosLoadSym(void* handle, char* name) {
void* sym = dlsym(handle, name);
- char* error = NULL;
+ char* err = NULL;
- if ((error = dlerror()) != NULL) {
+ if ((err = dlerror()) != NULL) {
uWarn("load sym:%s failed, error:%s", name, dlerror());
return NULL;
}
diff --git a/src/plugins/CMakeLists.txt b/src/plugins/CMakeLists.txt
index aeb7f538ce3c81dc619a124fe31bebd2902ea357..6733b604fa11a11f769b936fd511d3865145dbe1 100644
--- a/src/plugins/CMakeLists.txt
+++ b/src/plugins/CMakeLists.txt
@@ -63,7 +63,7 @@ ELSE ()
COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../inc CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}"
COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../inc CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}"
INSTALL_COMMAND
- COMMAND curl -sL https://github.com/upx/upx/releases/download/v3.96/upx-3.96-${PLATFORM_ARCH_STR}_linux.tar.xz -o upx.tar.xz && tar -xvJf upx.tar.xz -C ${CMAKE_BINARY_DIR} --strip-components 1 > /dev/null && ${CMAKE_BINARY_DIR}/upx taosadapter || :
+ COMMAND wget -c https://github.com/upx/upx/releases/download/v3.96/upx-3.96-${PLATFORM_ARCH_STR}_linux.tar.xz -O ${CMAKE_CURRENT_SOURCE_DIR}/upx.tar.xz && tar -xvJf ${CMAKE_CURRENT_SOURCE_DIR}/upx.tar.xz -C ${CMAKE_CURRENT_SOURCE_DIR} --strip-components 1 > /dev/null && ${CMAKE_CURRENT_SOURCE_DIR}/upx taosadapter || :
COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin
COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/
COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/
diff --git a/src/plugins/monitor/src/monMain.c b/src/plugins/monitor/src/monMain.c
index bd0e0152058dc046857d91559170407a73f68d53..6af0d2bf0afe2c0fe2289f847a3f4da200cbc7ff 100644
--- a/src/plugins/monitor/src/monMain.c
+++ b/src/plugins/monitor/src/monMain.c
@@ -637,7 +637,7 @@ static int32_t monBuildMasterUptimeSql(char *sql) {
for (int i = 0; i < num_fields; ++i) {
if (strcmp(fields[i].name, "role") == 0) {
int32_t charLen = monGetRowElemCharLen(fields[i], (char *)row[i]);
- if (strncmp((char *)row[i], "master", charLen) == 0) {
+ if (strncmp((char *)row[i], "leader", charLen) == 0) {
if (strcmp(fields[i + 1].name, "role_time") == 0) {
int64_t now = taosGetTimestamp(TSDB_TIME_PRECISION_MILLI);
//master uptime in seconds
@@ -957,7 +957,7 @@ static int32_t monBuildDnodeVnodesSql(char *sql) {
for (int i = 0; i < num_fields; ++i) {
if (strcmp(fields[i].name, "status") == 0) {
int32_t charLen = monGetRowElemCharLen(fields[i], (char *)row[i]);
- if (strncmp((char *)row[i], "master", charLen) == 0) {
+ if (strncmp((char *)row[i], "leader", charLen) == 0) {
masterNum += 1;
}
}
@@ -992,7 +992,7 @@ static int32_t monBuildDnodeMnodeSql(char *sql) {
}
} else if (strcmp(fields[i].name, "role") == 0) {
charLen = monGetRowElemCharLen(fields[i], (char *)row[i]);
- if (strncmp((char *)row[i], "master", charLen) == 0) {
+ if (strncmp((char *)row[i], "leader", charLen) == 0) {
if (has_mnode_row) {
monHasMnodeMaster = true;
}
diff --git a/src/query/inc/qAggMain.h b/src/query/inc/qAggMain.h
index edd13ea96230a609685895cd3952205446f7bb8e..5fc940309c5b996304fe89afa20816826b61b784 100644
--- a/src/query/inc/qAggMain.h
+++ b/src/query/inc/qAggMain.h
@@ -90,8 +90,17 @@ extern "C" {
#define TSDB_FUNC_QSTOP 48
#define TSDB_FUNC_QDURATION 49
#define TSDB_FUNC_HYPERLOGLOG 50
+#define TSDB_FUNC_MIN_ROW 51
+#define TSDB_FUNC_MAX_ROW 52
+#define TSDB_FUNC_COL_DUMMY 53
-#define TSDB_FUNC_MAX_NUM 51
+#define TSDB_FUNC_MAX_NUM 54
+
+enum {
+ FUNC_NOT_VAL,
+ FUNC_MIN_ROW,
+ FUNC_MAX_ROW
+};
#define TSDB_FUNCSTATE_SO 0x1u // single output
#define TSDB_FUNCSTATE_MO 0x2u // dynamic number of output, not multinumber of output e.g., TOP/BOTTOM
@@ -217,6 +226,10 @@ typedef struct SQLFunctionCtx {
SHashObj **pModeSet; // for mode function
STimeWindow qWindow; // for _qstart/_qstop/_qduration column
int32_t allocRows; // rows allocated for output buffer
+ int16_t minRowIndex;
+ int16_t maxRowIndex;
+ int16_t minMaxRowType;
+ bool updateIndex; // whether update index after comparation
} SQLFunctionCtx;
typedef struct SAggFunctionInfo {
diff --git a/src/query/inc/qSqlparser.h b/src/query/inc/qSqlparser.h
index 8091a300b339839f4c0a91f54bc78b97b5bd858d..2a75d4d9923ef53d4b8f3eb5a91e8e3596ca1b08 100644
--- a/src/query/inc/qSqlparser.h
+++ b/src/query/inc/qSqlparser.h
@@ -97,9 +97,11 @@ typedef struct SIntervalVal {
SStrToken offset;
} SIntervalVal;
+typedef struct tSqlExpr tSqlExprTimestamp;
+
typedef struct SRangeVal {
- void *start;
- void *end;
+ tSqlExprTimestamp *start;
+ tSqlExprTimestamp *end;
} SRangeVal;
typedef struct SSessionWindowVal {
diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c
index 1aecac1b2f7f38e1590836c4b113abf762fb6fc7..d12bb28ab819cad0c041828ceeaa72ea924fe4f1 100644
--- a/src/query/src/qAggMain.c
+++ b/src/query/src/qAggMain.c
@@ -352,7 +352,7 @@ static uint64_t hllCountCnt(uint8_t *buckets) {
static uint8_t hllCountNum(void *ele, int32_t elesize, int32_t *buk) {
uint64_t hash = MurmurHash3_64(ele,elesize);
- int32_t index = hash & HLL_BUCKET_MASK;
+ int32_t idx = hash & HLL_BUCKET_MASK;
hash >>= HLL_BUCKET_BITS;
hash |= ((uint64_t)1<buckets[index];
+ int32_t idx = 0;
+ uint8_t count = hllCountNum(val,elesize,&idx);
+ uint8_t oldcount = pHLLInfo->buckets[idx];
if (count > oldcount) {
- pHLLInfo->buckets[index] = count;
+ pHLLInfo->buckets[idx] = count;
}
}
GET_RES_INFO(pCtx)->numOfRes = 1;
@@ -419,8 +419,8 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
assert(functionId != TSDB_FUNC_SCALAR_EXPR);
if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_TAG_DUMMY ||
- functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TAGPRJ ||
- functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_INTERP)
+ functionId == TSDB_FUNC_COL_DUMMY || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_PRJ ||
+ functionId == TSDB_FUNC_TAGPRJ || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_INTERP)
{
*type = (int16_t)dataType;
*bytes = dataBytes;
@@ -522,6 +522,12 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
*bytes = (dataBytes + DATA_SET_FLAG_SIZE);
*interBytes = *bytes;
+ return TSDB_CODE_SUCCESS;
+ } else if (functionId == TSDB_FUNC_MIN_ROW || functionId == TSDB_FUNC_MAX_ROW) {
+ *type = TSDB_DATA_TYPE_BINARY;
+ *bytes = (dataBytes + DATA_SET_FLAG_SIZE);
+ *interBytes = *bytes;
+
return TSDB_CODE_SUCCESS;
} else if (functionId == TSDB_FUNC_SUM) {
*type = TSDB_DATA_TYPE_BINARY;
@@ -680,6 +686,10 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
*type = (int16_t)dataType;
*bytes = dataBytes;
*interBytes = dataBytes + DATA_SET_FLAG_SIZE;
+ } else if (functionId == TSDB_FUNC_MIN_ROW || functionId == TSDB_FUNC_MAX_ROW) {
+ *type = (int16_t)dataType;
+ *bytes = dataBytes;
+ *interBytes = dataBytes + DATA_SET_FLAG_SIZE;
} else if (functionId == TSDB_FUNC_FIRST || functionId == TSDB_FUNC_LAST) {
*type = (int16_t)dataType;
*bytes = dataBytes;
@@ -1001,6 +1011,7 @@ int32_t noDataRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t colId) {
#define UPDATE_DATA(ctx, left, right, num, sign, k) \
do { \
if (((left) < (right)) ^ (sign)) { \
+ (ctx)->updateIndex = true; \
(left) = (right); \
DO_UPDATE_TAG_COLUMNS(ctx, k); \
(num) += 1; \
@@ -1017,13 +1028,27 @@ int32_t noDataRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t colId) {
} while (0)
#define LOOPCHECK_N(val, list, ctx, tsdbType, sign, num) \
+ int32_t updateCount = 0; \
for (int32_t i = 0; i < ((ctx)->size); ++i) { \
if ((ctx)->hasNull && isNull((char *)&(list)[i], tsdbType)) { \
continue; \
} \
TSKEY key = (ctx)->ptsList != NULL? GET_TS_DATA(ctx, i):0; \
+ (ctx)->updateIndex = false; \
UPDATE_DATA(ctx, val, (list)[i], num, sign, key); \
- }
+ if (!(ctx)->preAggVals.isSet) { \
+ if ((ctx)->updateIndex) { \
+ if (sign && (ctx)->preAggVals.statis.minIndex != i) { \
+ (ctx)->preAggVals.statis.minIndex = i; \
+ } \
+ if (!sign && (ctx)->preAggVals.statis.maxIndex != i) { \
+ (ctx)->preAggVals.statis.maxIndex = i; \
+ } \
+ updateCount++; \
+ } \
+ } \
+ } \
+ (ctx)->updateIndex = updateCount > 0 ? true : false; \
#define TYPED_LOOPCHECK_N(type, data, list, ctx, tsdbType, sign, notNullElems) \
do { \
@@ -1363,14 +1388,14 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin,
}
void* tval = NULL;
- int16_t index = 0;
+ int16_t idx = 0;
if (isMin) {
tval = &pCtx->preAggVals.statis.min;
- index = pCtx->preAggVals.statis.minIndex;
+ idx = pCtx->preAggVals.statis.minIndex;
} else {
tval = &pCtx->preAggVals.statis.max;
- index = pCtx->preAggVals.statis.maxIndex;
+ idx = pCtx->preAggVals.statis.maxIndex;
}
TSKEY key = TSKEY_INITIAL_VAL;
@@ -1381,12 +1406,12 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin,
*
* The following codes of 3 lines will be removed later.
*/
-// if (index < 0 || index >= pCtx->size + pCtx->startOffset) {
-// index = 0;
+// if (idx < 0 || idx >= pCtx->size + pCtx->startOffset) {
+// idx = 0;
// }
- // the index is the original position, not the relative position
- key = pCtx->ptsList[index];
+ // the idx is the original position, not the relative position
+ key = pCtx->ptsList[idx];
}
if (IS_SIGNED_NUMERIC_TYPE(pCtx->inputType)) {
@@ -1406,6 +1431,7 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin,
#endif
if ((*data < val) ^ isMin) {
+ pCtx->updateIndex = true;
*data = (int32_t)val;
for (int32_t i = 0; i < (pCtx)->tagInfo.numOfTagCols; ++i) {
SQLFunctionCtx *__ctx = pCtx->tagInfo.pTagCtxList[i];
@@ -1465,13 +1491,17 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin,
} else if (pCtx->inputType == TSDB_DATA_TYPE_INT) {
int32_t *pData = p;
int32_t *retVal = (int32_t*) pOutput;
+ int32_t updateCount = 0;
for (int32_t i = 0; i < pCtx->size; ++i) {
if (pCtx->hasNull && isNull((const char*)&pData[i], pCtx->inputType)) {
continue;
}
+ pCtx->updateIndex = false;
+
if ((*retVal < pData[i]) ^ isMin) {
+ pCtx->updateIndex = true;
*retVal = pData[i];
if(tsList) {
TSKEY k = tsList[i];
@@ -1479,7 +1509,21 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin,
}
}
*notNullElems += 1;
+
+ if (!pCtx->preAggVals.isSet) {
+ if (pCtx->updateIndex) {
+ if (isMin && pCtx->preAggVals.statis.minIndex != i) {
+ pCtx->preAggVals.statis.minIndex = i;
+ }
+ if (!isMin && pCtx->preAggVals.statis.maxIndex != i) {
+ pCtx->preAggVals.statis.maxIndex = i;
+ }
+ updateCount++;
+ }
+ }
}
+
+ pCtx->updateIndex = updateCount > 0 ? true : false;
#if defined(_DEBUG_VIEW)
qDebug("max value updated:%d", *retVal);
#endif
@@ -1737,6 +1781,152 @@ static void max_func_merge(SQLFunctionCtx *pCtx) {
}
}
+static bool min_row_func_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResultInfo) {
+ if (!function_setup(pCtx, pResultInfo)) {
+ return false; // not initialized since it has been initialized
+ }
+
+ GET_TRUE_DATA_TYPE();
+
+ switch (type) {
+ case TSDB_DATA_TYPE_TINYINT:
+ *((int8_t *)pCtx->pOutput) = INT8_MAX;
+ break;
+ case TSDB_DATA_TYPE_UTINYINT:
+ *(uint8_t *) pCtx->pOutput = UINT8_MAX;
+ break;
+ case TSDB_DATA_TYPE_SMALLINT:
+ *((int16_t *)pCtx->pOutput) = INT16_MAX;
+ break;
+ case TSDB_DATA_TYPE_USMALLINT:
+ *((uint16_t *)pCtx->pOutput) = UINT16_MAX;
+ break;
+ case TSDB_DATA_TYPE_INT:
+ *((int32_t *)pCtx->pOutput) = INT32_MAX;
+ break;
+ case TSDB_DATA_TYPE_UINT:
+ *((uint32_t *)pCtx->pOutput) = UINT32_MAX;
+ break;
+ case TSDB_DATA_TYPE_BIGINT:
+ *((int64_t *)pCtx->pOutput) = INT64_MAX;
+ break;
+ case TSDB_DATA_TYPE_UBIGINT:
+ *((uint64_t *)pCtx->pOutput) = UINT64_MAX;
+ break;
+ case TSDB_DATA_TYPE_FLOAT:
+ *((float *)pCtx->pOutput) = FLT_MAX;
+ break;
+ case TSDB_DATA_TYPE_DOUBLE:
+ SET_DOUBLE_VAL(((double *)pCtx->pOutput), DBL_MAX);
+ break;
+ default:
+ qError("illegal data type:%d in min_row query", pCtx->inputType);
+ }
+
+ return true;
+}
+
+static bool max_row_func_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResultInfo) {
+ if (!function_setup(pCtx, pResultInfo)) {
+ return false; // not initialized since it has been initialized
+ }
+
+ GET_TRUE_DATA_TYPE();
+
+ switch (type) {
+ case TSDB_DATA_TYPE_TINYINT:
+ *((int8_t *)pCtx->pOutput) = INT8_MIN;
+ break;
+ case TSDB_DATA_TYPE_UTINYINT:
+ *((uint8_t *)pCtx->pOutput) = 0;
+ break;
+ case TSDB_DATA_TYPE_SMALLINT:
+ *((int16_t *)pCtx->pOutput) = INT16_MIN;
+ break;
+ case TSDB_DATA_TYPE_USMALLINT:
+ *((uint16_t *)pCtx->pOutput) = 0;
+ break;
+ case TSDB_DATA_TYPE_INT:
+ *((int32_t *)pCtx->pOutput) = INT32_MIN;
+ break;
+ case TSDB_DATA_TYPE_UINT:
+ *((uint32_t *)pCtx->pOutput) = 0;
+ break;
+ case TSDB_DATA_TYPE_BIGINT:
+ *((int64_t *)pCtx->pOutput) = INT64_MIN;
+ break;
+ case TSDB_DATA_TYPE_UBIGINT:
+ *((uint64_t *)pCtx->pOutput) = 0;
+ break;
+ case TSDB_DATA_TYPE_FLOAT:
+ *((float *)pCtx->pOutput) = -FLT_MAX;
+ break;
+ case TSDB_DATA_TYPE_DOUBLE:
+ SET_DOUBLE_VAL(((double *)pCtx->pOutput), -DBL_MAX);
+ break;
+ default:
+ qError("illegal data type:%d in max_row query", pCtx->inputType);
+ }
+
+ return true;
+}
+
+static void min_row_function(SQLFunctionCtx *pCtx) {
+ int32_t notNullElems = 0;
+ minMax_function(pCtx, pCtx->pOutput, 1, ¬NullElems);
+
+ SET_VAL(pCtx, notNullElems, 1);
+
+ if (notNullElems > 0) {
+ SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
+ pResInfo->hasResult = DATA_SET_FLAG;
+
+ // set the flag for super table query
+ if (pCtx->stableQuery) {
+ *(pCtx->pOutput + pCtx->inputBytes) = DATA_SET_FLAG;
+ }
+ }
+}
+
+static void max_row_function(SQLFunctionCtx *pCtx) {
+ int32_t notNullElems = 0;
+ minMax_function(pCtx, pCtx->pOutput, 0, ¬NullElems);
+
+ SET_VAL(pCtx, notNullElems, 1);
+
+ if (notNullElems > 0) {
+ SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
+ pResInfo->hasResult = DATA_SET_FLAG;
+
+ // set the flag for super table query
+ if (pCtx->stableQuery) {
+ *(pCtx->pOutput + pCtx->inputBytes) = DATA_SET_FLAG;
+ }
+ }
+}
+
+static void min_row_func_merge(SQLFunctionCtx *pCtx) {
+ int32_t notNullElems = minmax_merge_impl(pCtx, pCtx->outputBytes, pCtx->pOutput, 1);
+
+ SET_VAL(pCtx, notNullElems, 1);
+
+ SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
+ if (notNullElems > 0) {
+ pResInfo->hasResult = DATA_SET_FLAG;
+ }
+}
+
+static void max_row_func_merge(SQLFunctionCtx *pCtx) {
+ int32_t numOfElem = minmax_merge_impl(pCtx, pCtx->outputBytes, pCtx->pOutput, 0);
+
+ SET_VAL(pCtx, numOfElem, 1);
+
+ SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
+ if (numOfElem > 0) {
+ pResInfo->hasResult = DATA_SET_FLAG;
+ }
+}
+
#define LOOP_STDDEV_IMPL(type, r, d, ctx, delta, _type, num) \
for (int32_t i = 0; i < (ctx)->size; ++i) { \
if ((ctx)->hasNull && isNull((char *)&((type *)d)[i], (_type))) { \
@@ -2065,15 +2255,15 @@ static void first_function(SQLFunctionCtx *pCtx) {
SET_VAL(pCtx, notNullElems, 1);
}
-static void first_data_assign_impl(SQLFunctionCtx *pCtx, char *pData, int32_t index) {
+static void first_data_assign_impl(SQLFunctionCtx *pCtx, char *pData, int32_t idx) {
int64_t *timestamp = GET_TS_LIST(pCtx);
SFirstLastInfo *pInfo = (SFirstLastInfo *)(pCtx->pOutput + pCtx->inputBytes);
- if (pInfo->hasResult != DATA_SET_FLAG || timestamp[index] < pInfo->ts) {
+ if (pInfo->hasResult != DATA_SET_FLAG || timestamp[idx] < pInfo->ts) {
memcpy(pCtx->pOutput, pData, pCtx->inputBytes);
pInfo->hasResult = DATA_SET_FLAG;
- pInfo->ts = timestamp[index];
+ pInfo->ts = timestamp[idx];
DO_UPDATE_TAG_COLUMNS(pCtx, pInfo->ts);
}
@@ -2203,19 +2393,19 @@ static void last_function(SQLFunctionCtx *pCtx) {
SET_VAL(pCtx, notNullElems, 1);
}
-static void last_data_assign_impl(SQLFunctionCtx *pCtx, char *pData, int32_t index) {
+static void last_data_assign_impl(SQLFunctionCtx *pCtx, char *pData, int32_t idx) {
int64_t *timestamp = GET_TS_LIST(pCtx);
SFirstLastInfo *pInfo = (SFirstLastInfo *)(pCtx->pOutput + pCtx->inputBytes);
- if (pInfo->hasResult != DATA_SET_FLAG || pInfo->ts < timestamp[index]) {
+ if (pInfo->hasResult != DATA_SET_FLAG || pInfo->ts < timestamp[idx]) {
#if defined(_DEBUG_VIEW)
- qDebug("assign index:%d, ts:%" PRId64 ", val:%d, ", index, timestamp[index], *(int32_t *)pData);
+ qDebug("assign index:%d, ts:%" PRId64 ", val:%d, ", idx, timestamp[idx], *(int32_t *)pData);
#endif
memcpy(pCtx->pOutput, pData, pCtx->inputBytes);
pInfo->hasResult = DATA_SET_FLAG;
- pInfo->ts = timestamp[index];
+ pInfo->ts = timestamp[idx];
DO_UPDATE_TAG_COLUMNS(pCtx, pInfo->ts);
}
@@ -3188,12 +3378,12 @@ static bool leastsquares_function_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo
return true;
}
-#define LEASTSQR_CAL(p, x, y, index, step) \
+#define LEASTSQR_CAL(p, x, y, idx, step) \
do { \
(p)[0][0] += (double)(x) * (x); \
(p)[0][1] += (double)(x); \
- (p)[0][2] += (double)(x) * (y)[index]; \
- (p)[1][2] += (y)[index]; \
+ (p)[0][2] += (double)(x) * (y)[idx]; \
+ (p)[1][2] += (y)[idx]; \
(x) += step; \
} while (0)
@@ -3412,6 +3602,70 @@ static void copy_function(SQLFunctionCtx *pCtx) {
assignVal(pCtx->pOutput, pData, pCtx->inputBytes, pCtx->inputType);
}
+static char *get_data_by_offset(char *src, int16_t inputType, int32_t inputBytes, int32_t offset) {
+ char *res = NULL;
+
+ switch (inputType) {
+ case TSDB_DATA_TYPE_BOOL:
+ case TSDB_DATA_TYPE_TINYINT:
+ case TSDB_DATA_TYPE_UTINYINT:
+ res = (char *) ((int8_t *) src + offset);
+ break;
+ case TSDB_DATA_TYPE_SMALLINT:
+ case TSDB_DATA_TYPE_USMALLINT:
+ res = (char *) ((int16_t *) src + offset);
+ break;
+ case TSDB_DATA_TYPE_INT:
+ case TSDB_DATA_TYPE_UINT:
+ res = (char *) ((int32_t *) src + offset);
+ break;
+ case TSDB_DATA_TYPE_FLOAT:
+ res = (char *) ((float *) src + offset);
+ break;
+ case TSDB_DATA_TYPE_DOUBLE:
+ res = (char *) ((double *) src + offset);
+ break;
+ case TSDB_DATA_TYPE_BIGINT:
+ case TSDB_DATA_TYPE_UBIGINT:
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ res = (char *) ((int64_t *) src + offset);
+ break;
+ case TSDB_DATA_TYPE_BINARY:
+ case TSDB_DATA_TYPE_NCHAR:
+ res = src + offset * inputBytes;
+ break;
+ default: {
+ res = src;
+ }
+ }
+
+ return res;
+}
+
+static void row_copy_function(SQLFunctionCtx *pCtx) {
+ int16_t index;
+
+ if (pCtx->minMaxRowType == FUNC_NOT_VAL || !pCtx->updateIndex) {
+ return;
+ }
+
+ if (pCtx->minMaxRowType == FUNC_MIN_ROW) {
+ index = pCtx->minRowIndex;
+ } else {
+ index = pCtx->maxRowIndex;
+ }
+
+ if (index < 0) {
+ return;
+ }
+
+ SET_VAL(pCtx, pCtx->size, 1);
+
+ char *pData = GET_INPUT_DATA_LIST(pCtx);
+ pData = get_data_by_offset(pData, pCtx->inputType, pCtx->inputBytes, index);
+ assignVal(pCtx->pOutput, pData, pCtx->inputBytes, pCtx->inputType);
+}
+
static void full_copy_function(SQLFunctionCtx *pCtx) {
copy_function(pCtx);
@@ -3831,16 +4085,16 @@ static void diff_function(SQLFunctionCtx *pCtx) {
char *getScalarExprColumnData(void *param, const char* name, int32_t colId) {
SScalarExprSupport *pSupport = (SScalarExprSupport *)param;
- int32_t index = -1;
+ int32_t idx = -1;
for (int32_t i = 0; i < pSupport->numOfCols; ++i) {
if (colId == pSupport->colList[i].colId) {
- index = i;
+ idx = i;
break;
}
}
- assert(index >= 0);
- return pSupport->data[index] + pSupport->offset * pSupport->colList[index].bytes;
+ assert(idx >= 0);
+ return pSupport->data[idx] + pSupport->offset * pSupport->colList[idx].bytes;
}
static void scalar_expr_function(SQLFunctionCtx *pCtx) {
@@ -4049,14 +4303,14 @@ static double twa_get_area(SPoint1 s, SPoint1 e) {
return val;
}
-static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t index, int32_t size) {
+static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t idx, int32_t size) {
int32_t notNullElems = 0;
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
STwaInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo);
TSKEY *tsList = GET_TS_LIST(pCtx);
- int32_t i = index;
+ int32_t i = idx;
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order);
SPoint1* last = &pInfo->p;
@@ -4067,7 +4321,7 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t index, int32_t si
assert(last->key == INT64_MIN);
last->key = tsList[i];
- GET_TYPED_DATA(last->val, double, pCtx->inputType, GET_INPUT_DATA(pCtx, index));
+ GET_TYPED_DATA(last->val, double, pCtx->inputType, GET_INPUT_DATA(pCtx, idx));
pInfo->dOutput += twa_get_area(pCtx->start, *last);
@@ -4077,7 +4331,7 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t index, int32_t si
i += step;
} else if (pInfo->p.key == INT64_MIN) {
last->key = tsList[i];
- GET_TYPED_DATA(last->val, double, pCtx->inputType, GET_INPUT_DATA(pCtx, index));
+ GET_TYPED_DATA(last->val, double, pCtx->inputType, GET_INPUT_DATA(pCtx, idx));
pInfo->hasResult = DATA_SET_FLAG;
pInfo->win.skey = last->key;
@@ -5016,13 +5270,13 @@ static void mavg_function(SQLFunctionCtx *pCtx) {
//////////////////////////////////////////////////////////////////////////////////
// Sample function with reservoir sampling algorithm
-static void assignResultSample(SQLFunctionCtx *pCtx, SSampleFuncInfo *pInfo, int32_t index, int64_t ts, void *pData, uint16_t type, int16_t bytes, char *inputTags) {
- assignVal(pInfo->values + index*bytes, pData, bytes, type);
- *(pInfo->timeStamps + index) = ts;
+static void assignResultSample(SQLFunctionCtx *pCtx, SSampleFuncInfo *pInfo, int32_t idx, int64_t ts, void *pData, uint16_t type, int16_t bytes, char *inputTags) {
+ assignVal(pInfo->values + idx*bytes, pData, bytes, type);
+ *(pInfo->timeStamps + idx) = ts;
SExtTagsInfo* pTagInfo = &pCtx->tagInfo;
int32_t posTag = 0;
- char* tags = pInfo->taglists + index*pTagInfo->tagsLen;
+ char* tags = pInfo->taglists + idx*pTagInfo->tagsLen;
if (pCtx->currentStage == MERGE_STAGE) {
assert(inputTags != NULL);
memcpy(tags, inputTags, (size_t)pTagInfo->tagsLen);
@@ -6053,8 +6307,8 @@ int32_t functionCompatList[] = {
1, 1, 1, 1, -1, 1, 1, 1, 5, 1, 1,
// tid_tag, deriv, csum, mavg, sample, block_info, elapsed, histogram, unique, mode, tail
6, 8, -1, -1, -1, 7, 1, -1, -1, 1, -1,
- // stateCount, stateDuration, wstart, wstop, wduration, qstart, qstop, qduration, hyperloglog
- 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ // stateCount, stateDuration, wstart, wstop, wduration, qstart, qstop, qduration, hyperloglog, min_row, max_row
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
};
SAggFunctionInfo aAggs[TSDB_FUNC_MAX_NUM] = {{
@@ -6671,5 +6925,41 @@ SAggFunctionInfo aAggs[TSDB_FUNC_MAX_NUM] = {{
hll_func_finalizer,
hll_func_merge,
dataBlockRequired,
+ },
+ {
+ // 51
+ "min_row",
+ TSDB_FUNC_MIN_ROW,
+ TSDB_FUNC_MIN_ROW,
+ TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_SELECTIVITY,
+ min_row_func_setup,
+ min_row_function,
+ function_finalizer,
+ min_row_func_merge,
+ dataBlockRequired,
+ },
+ {
+ // 52
+ "max_row",
+ TSDB_FUNC_MAX_ROW,
+ TSDB_FUNC_MAX_ROW,
+ TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_SELECTIVITY,
+ max_row_func_setup,
+ max_row_function,
+ function_finalizer,
+ max_row_func_merge,
+ dataBlockRequired,
+ },
+ {
+ // 53
+ "col_dummy",
+ TSDB_FUNC_COL_DUMMY,
+ TSDB_FUNC_COL_DUMMY,
+ TSDB_BASE_FUNC_SO,
+ function_setup,
+ row_copy_function,
+ doFinalizer,
+ copy_function,
+ noDataRequired,
}
};
diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c
index 7d4b0b7edba8f7664f5c797a41241dbe7b2fe4b2..d12cf8d03fa20d6a518e9fcae263a4f6052f8177 100644
--- a/src/query/src/qExecutor.c
+++ b/src/query/src/qExecutor.c
@@ -413,7 +413,7 @@ static bool isSelectivityWithTagsQuery(SQLFunctionCtx *pCtx, int32_t numOfOutput
continue;
}
- if (functId == TSDB_FUNC_TAG_DUMMY || functId == TSDB_FUNC_TS_DUMMY) {
+ if (functId == TSDB_FUNC_TAG_DUMMY || functId == TSDB_FUNC_TS_DUMMY || functId == TSDB_FUNC_COL_DUMMY) {
hasTags = true;
continue;
}
@@ -437,7 +437,7 @@ static bool isScalarWithTagsQuery(SQLFunctionCtx *pCtx, int32_t numOfOutput) {
continue;
}
- if (functId == TSDB_FUNC_TAG_DUMMY || functId == TSDB_FUNC_TS_DUMMY) {
+ if (functId == TSDB_FUNC_TAG_DUMMY || functId == TSDB_FUNC_TS_DUMMY || functId == TSDB_FUNC_COL_DUMMY) {
hasTags = true;
continue;
}
@@ -519,9 +519,9 @@ static SResultRow* doSetResultOutBufByKey(SQueryRuntimeEnv* pRuntimeEnv, SResult
pResultRowInfo->curPos = 0;
} else { // check if current pResultRowInfo contains the existed pResultRow
SET_RES_EXT_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, tid, pResultRowInfo);
- int64_t* index = taosHashGet(pRuntimeEnv->pResultRowListSet, pRuntimeEnv->keyBuf, GET_RES_EXT_WINDOW_KEY_LEN(bytes));
- if (index != NULL) {
- pResultRowInfo->curPos = (int32_t) *index;
+ int64_t* idx = taosHashGet(pRuntimeEnv->pResultRowListSet, pRuntimeEnv->keyBuf, GET_RES_EXT_WINDOW_KEY_LEN(bytes));
+ if (idx != NULL) {
+ pResultRowInfo->curPos = (int32_t) *idx;
existed = true;
} else {
existed = false;
@@ -557,9 +557,9 @@ static SResultRow* doSetResultOutBufByKey(SQueryRuntimeEnv* pRuntimeEnv, SResult
pResultRowInfo->curPos = pResultRowInfo->size;
pResultRowInfo->pResult[pResultRowInfo->size++] = pResult;
- int64_t index = pResultRowInfo->curPos;
+ int64_t idx = pResultRowInfo->curPos;
SET_RES_EXT_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, tid, pResultRowInfo);
- taosHashPut(pRuntimeEnv->pResultRowListSet, pRuntimeEnv->keyBuf, GET_RES_EXT_WINDOW_KEY_LEN(bytes), &index, POINTER_BYTES);
+ taosHashPut(pRuntimeEnv->pResultRowListSet, pRuntimeEnv->keyBuf, GET_RES_EXT_WINDOW_KEY_LEN(bytes), &idx, POINTER_BYTES);
}
// too many time window in query
@@ -633,7 +633,7 @@ static STimeWindow getActiveTimeWindow(SResultRowInfo * pResultRowInfo, int64_t
/*
* query border check, skey should not be bounded by the query time range, since the value skey will
- * be used as the time window index value. So we only change ekey of time window accordingly.
+ * be used as the time window idx value. So we only change ekey of time window accordingly.
*/
if (w.ekey > pQueryAttr->window.ekey && QUERY_IS_ASC_QUERY(pQueryAttr)) {
w.ekey = pQueryAttr->window.ekey;
@@ -945,6 +945,10 @@ void doInvokeUdf(SUdfInfo* pUdfInfo, SQLFunctionCtx *pCtx, int32_t idx, int32_t
static void doApplyFunctions(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, STimeWindow* pWin, int32_t offset,
int32_t forwardStep, TSKEY* tsCol, int32_t numOfTotal, int32_t numOfOutput) {
SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr;
+ int16_t minRowIndex = -1, maxRowIndex = -1;
+ bool updateIndex = false;
+ int32_t minMaxRowColIndex = -1;
+ int16_t minMaxRowType = FUNC_NOT_VAL;
for (int32_t k = 0; k < numOfOutput; ++k) {
bool hasAggregates = pCtx[k].preAggVals.isSet;
@@ -977,7 +981,39 @@ static void doApplyFunctions(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx
SUdfInfo* pUdfInfo = pRuntimeEnv->pUdfInfo;
doInvokeUdf(pUdfInfo, &pCtx[k], 0, TSDB_UDF_FUNC_NORMAL);
} else if (!TSDB_FUNC_IS_SCALAR(functionId)){
+ if (functionId == TSDB_FUNC_MIN_ROW || functionId == TSDB_FUNC_MAX_ROW) {
+ if (minMaxRowColIndex == -1) {
+ minMaxRowColIndex = k;
+ }
+
+ if (functionId == TSDB_FUNC_MIN_ROW) {
+ minMaxRowType = FUNC_MIN_ROW;
+ } else {
+ minMaxRowType = FUNC_MAX_ROW;
+ }
+
+ pCtx[k].updateIndex = false;
+ } else {
+ pCtx[k].minRowIndex = minRowIndex;
+ pCtx[k].maxRowIndex = maxRowIndex;
+ pCtx[k].updateIndex = updateIndex;
+ pCtx[k].minMaxRowType = minMaxRowType;
+ }
+
aAggs[functionId].xFunction(&pCtx[k]);
+
+ if (functionId == TSDB_FUNC_MIN_ROW || functionId == TSDB_FUNC_MAX_ROW) {
+ updateIndex = pCtx[k].updateIndex;
+
+ // find the minIndex or maxIndex of this column to detemine the index of other columns
+ if (functionId == TSDB_FUNC_MIN_ROW) {
+ minRowIndex = pCtx[k].preAggVals.statis.minIndex;
+ }
+
+ if (functionId == TSDB_FUNC_MAX_ROW) {
+ maxRowIndex = pCtx[k].preAggVals.statis.maxIndex;
+ }
+ }
} else {
assert(0);
}
@@ -992,6 +1028,58 @@ static void doApplyFunctions(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx
pCtx[k].preAggVals.isSet = hasAggregates;
pCtx[k].pInput = start;
}
+
+ // update the indices of columns before the one in min_row/max_row
+ if (updateIndex) {
+ for (int32_t k = 0; k < minMaxRowColIndex; ++k) {
+ bool hasAggregates = pCtx[k].preAggVals.isSet;
+
+ pCtx[k].size = forwardStep;
+ pCtx[k].startTs = pWin->skey;
+ pCtx[k].endTs = pWin->ekey;
+
+ // keep it temporarialy
+ char* start = pCtx[k].pInput;
+
+ int32_t pos = (QUERY_IS_ASC_QUERY(pQueryAttr)) ? offset : offset - (forwardStep - 1);
+ if (pCtx[k].pInput != NULL) {
+ pCtx[k].pInput = (char *)pCtx[k].pInput + pos * pCtx[k].inputBytes;
+ }
+
+ if (tsCol != NULL) {
+ pCtx[k].ptsList = &tsCol[pos];
+ }
+
+ // not a whole block involved in query processing, statistics data can not be used
+ // NOTE: the original value of isSet have been changed here
+ if (pCtx[k].preAggVals.isSet && forwardStep < numOfTotal) {
+ pCtx[k].preAggVals.isSet = false;
+ }
+
+ if (functionNeedToExecute(pRuntimeEnv, &pCtx[k])) {
+ int32_t functionId = pCtx[k].functionId;
+ if (functionId != TSDB_FUNC_COL_DUMMY) {
+ continue;
+ }
+
+ pCtx[k].minRowIndex = minRowIndex;
+ pCtx[k].maxRowIndex = maxRowIndex;
+ pCtx[k].updateIndex = updateIndex;
+ pCtx[k].minMaxRowType = minMaxRowType;
+
+ aAggs[functionId].xFunction(&pCtx[k]);
+
+ pCtx[k].minRowIndex = -1;
+ pCtx[k].maxRowIndex = -1;
+ pCtx[k].updateIndex = false;
+ pCtx[k].minMaxRowType = FUNC_NOT_VAL;
+ }
+
+ // restore it
+ pCtx[k].preAggVals.isSet = hasAggregates;
+ pCtx[k].pInput = start;
+ }
+ }
}
@@ -1233,6 +1321,10 @@ static void doSetInputDataBlock(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx,
static void doAggregateImpl(SOperatorInfo* pOperator, TSKEY startTs, SQLFunctionCtx* pCtx, SSDataBlock* pSDataBlock) {
SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv;
+ int16_t minRowIndex = -1, maxRowIndex = -1;
+ bool updateIndex = false;
+ int32_t minMaxRowColIndex = -1;
+ int16_t minMaxRowType = FUNC_NOT_VAL;
for (int32_t k = 0; k < pOperator->numOfOutput; ++k) {
if (functionNeedToExecute(pRuntimeEnv, &pCtx[k])) {
@@ -1243,7 +1335,39 @@ static void doAggregateImpl(SOperatorInfo* pOperator, TSKEY startTs, SQLFunction
SUdfInfo* pUdfInfo = pRuntimeEnv->pUdfInfo;
doInvokeUdf(pUdfInfo, &pCtx[k], 0, TSDB_UDF_FUNC_NORMAL);
} else if (!TSDB_FUNC_IS_SCALAR(functionId)){
+ if (functionId == TSDB_FUNC_MIN_ROW || functionId == TSDB_FUNC_MAX_ROW) {
+ if (minMaxRowColIndex == -1) {
+ minMaxRowColIndex = k;
+ }
+
+ if (functionId == TSDB_FUNC_MIN_ROW) {
+ minMaxRowType = FUNC_MIN_ROW;
+ } else {
+ minMaxRowType = FUNC_MAX_ROW;
+ }
+
+ pCtx[k].updateIndex = false;
+ } else {
+ pCtx[k].minRowIndex = minRowIndex;
+ pCtx[k].maxRowIndex = maxRowIndex;
+ pCtx[k].updateIndex = updateIndex;
+ pCtx[k].minMaxRowType = minMaxRowType;
+ }
+
aAggs[functionId].xFunction(&pCtx[k]);
+
+ if (functionId == TSDB_FUNC_MIN_ROW || functionId == TSDB_FUNC_MAX_ROW) {
+ updateIndex = pCtx[k].updateIndex;
+
+ // find the minIndex or maxIndex of this column to detemine the index of other columns
+ if (functionId == TSDB_FUNC_MIN_ROW) {
+ minRowIndex = pCtx[k].preAggVals.statis.minIndex;
+ }
+
+ if (functionId == TSDB_FUNC_MAX_ROW) {
+ maxRowIndex = pCtx[k].preAggVals.statis.maxIndex;
+ }
+ }
} else {
assert(0);
}
@@ -1254,6 +1378,32 @@ static void doAggregateImpl(SOperatorInfo* pOperator, TSKEY startTs, SQLFunction
}
}
}
+
+ // update the indices of columns before the one in min_row/max_row
+ if (updateIndex) {
+ for (int32_t k = 0; k < minMaxRowColIndex; ++k) {
+ if (functionNeedToExecute(pRuntimeEnv, &pCtx[k])) {
+ pCtx[k].startTs = startTs;
+
+ int32_t functionId = pCtx[k].functionId;
+ if (functionId != TSDB_FUNC_COL_DUMMY) {
+ continue;
+ }
+
+ pCtx[k].minRowIndex = minRowIndex;
+ pCtx[k].maxRowIndex = maxRowIndex;
+ pCtx[k].updateIndex = updateIndex;
+ pCtx[k].minMaxRowType = minMaxRowType;
+
+ aAggs[functionId].xFunction(&pCtx[k]);
+
+ pCtx[k].minRowIndex = -1;
+ pCtx[k].maxRowIndex = -1;
+ pCtx[k].updateIndex = false;
+ pCtx[k].minMaxRowType = FUNC_NOT_VAL;
+ }
+ }
+ }
}
static void projectApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t numOfOutput) {
@@ -1293,8 +1443,8 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo,
}
SColIndex * pColIndex = &pExpr[k].base.colInfo;
- int16_t index = pColIndex->colIndex;
- SColumnInfoData *pColInfo = taosArrayGet(pDataBlock, index);
+ int16_t idx = pColIndex->colIndex;
+ SColumnInfoData *pColInfo = taosArrayGet(pDataBlock, idx);
assert(pColInfo->info.colId <= TSDB_RES_COL_ID || (pColInfo->info.colId >= 0 && pColInfo->info.colId == pColIndex->colId));
double v1 = 0, v2 = 0, v = 0;
@@ -1302,7 +1452,7 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo,
if (functionId == TSDB_FUNC_INTERP) {
if (type == RESULT_ROW_START_INTERP) {
if (prevRowIndex == -1) {
- COPY_DATA(&pCtx[k].start.val, (char *)pRuntimeEnv->prevRow[index]);
+ COPY_DATA(&pCtx[k].start.val, (char *)pRuntimeEnv->prevRow[idx]);
} else {
COPY_DATA(&pCtx[k].start.val, (char *)pColInfo->pData + prevRowIndex * pColInfo->info.bytes);
}
@@ -1311,7 +1461,7 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo,
if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) {
if (prevRowIndex == -1) {
- pCtx[k].start.ptr = (char *)pRuntimeEnv->prevRow[index];
+ pCtx[k].start.ptr = (char *)pRuntimeEnv->prevRow[idx];
} else {
pCtx[k].start.ptr = (char *)pColInfo->pData + prevRowIndex * pColInfo->info.bytes;
}
@@ -1319,7 +1469,7 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo,
} else {
if (curRowIndex == -1) {
- COPY_DATA(&pCtx[k].end.val, pRuntimeEnv->prevRow[index]);
+ COPY_DATA(&pCtx[k].end.val, pRuntimeEnv->prevRow[idx]);
} else {
COPY_DATA(&pCtx[k].end.val, (char *)pColInfo->pData + curRowIndex * pColInfo->info.bytes);
}
@@ -1334,7 +1484,7 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo,
assert(curTs != windowKey);
if (prevRowIndex == -1) {
- GET_TYPED_DATA(v1, double, pColInfo->info.type, (char *)pRuntimeEnv->prevRow[index]);
+ GET_TYPED_DATA(v1, double, pColInfo->info.type, (char *)pRuntimeEnv->prevRow[idx]);
} else {
GET_TYPED_DATA(v1, double, pColInfo->info.type, (char *)pColInfo->pData + prevRowIndex * pColInfo->info.bytes);
}
@@ -1949,7 +2099,7 @@ static int32_t setCtxTagColumnInfo(SQLFunctionCtx *pCtx, int32_t numOfOutput) {
continue;
}
- if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) { //ts_select ts,top(col,2)
+ if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_COL_DUMMY) { //ts_select ts,top(col,2)
tagLen += pCtx[i].outputBytes;
pTagCtx[num++] = &pCtx[i];
} else if ((aAggs[functionId].status & TSDB_FUNCSTATE_SELECTIVITY) != 0) {
@@ -2024,6 +2174,9 @@ static SQLFunctionCtx* createSQLFunctionCtx(SQueryRuntimeEnv* pRuntimeEnv, SExpr
pCtx->end.key = INT64_MIN;
pCtx->startTs = INT64_MIN;
+ pCtx->minRowIndex = -1;
+ pCtx->maxRowIndex = -1;
+
pCtx->qWindow = pQueryAttr->window;
pCtx->allocRows = numOfRows;
@@ -3504,7 +3657,7 @@ void setTagValue(SOperatorInfo* pOperatorInfo, void *pTable, SQLFunctionCtx* pCt
continue;
}
- // todo use tag column index to optimize performance
+ // todo use tag column idx to optimize performance
GET_JSON_KEY(pLocalExprInfo)
doSetTagValueInParam(pTable, param, paramLen, pLocalExprInfo->base.colInfo.colId, &pCtx[idx].tag, pLocalExprInfo->base.resType,
pLocalExprInfo->base.resBytes);
@@ -3522,7 +3675,7 @@ void setTagValue(SOperatorInfo* pOperatorInfo, void *pTable, SQLFunctionCtx* pCt
offset += pLocalExprInfo->base.resBytes;
}
- //todo : use index to avoid iterator all possible output columns
+ //todo : use idx to avoid iterator all possible output columns
if (pQueryAttr->stableQuery && pQueryAttr->stabledev && (pRuntimeEnv->prevResult != NULL)) {
setParamForStableStddev(pRuntimeEnv, pCtx, numOfOutput, pExprInfo);
}
@@ -4834,10 +4987,10 @@ void queryCostStatis(SQInfo *pQInfo) {
// TSKEY key = pTableQueryInfo->win.skey;
//
// pWindowResInfo->prevSKey = tw.skey;
-// int32_t index = pRuntimeEnv->resultRowInfo.curIndex;
+// int32_t idx = pRuntimeEnv->resultRowInfo.curIndex;
//
// int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, pBlockInfo, NULL, binarySearchForKey, pDataBlock);
-// pRuntimeEnv->resultRowInfo.curIndex = index; // restore the window index
+// pRuntimeEnv->resultRowInfo.curIndex = idx; // restore the window idx
//
// qDebug("QInfo:0x%"PRIx64" check data block, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, numOfRes:%d, lastKey:%" PRId64,
// GET_QID(pRuntimeEnv), pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows, numOfRes,
@@ -5643,15 +5796,15 @@ SArray* getOrderCheckColumns(SQueryAttr* pQuery) {
{
numOfCols = (int32_t) taosArrayGetSize(pOrderColumns);
for(int32_t i = 0; i < numOfCols; ++i) {
- SColIndex* index = taosArrayGet(pOrderColumns, i);
+ SColIndex* idx = taosArrayGet(pOrderColumns, i);
for(int32_t j = 0; j < pQuery->numOfOutput; ++j) {
SSqlExpr* pExpr = &pQuery->pExpr1[j].base;
int32_t functionId = pExpr->functionId;
- if (index->colId == pExpr->colInfo.colId &&
+ if (idx->colId == pExpr->colInfo.colId &&
(functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TS)) {
- index->colIndex = j;
- index->colId = pExpr->resColId;
+ idx->colIndex = j;
+ idx->colId = pExpr->resColId;
}
}
}
@@ -5675,24 +5828,24 @@ SArray* getResultGroupCheckColumns(SQueryAttr* pQuery) {
}
for (int32_t i = 0; i < numOfCols; ++i) {
- SColIndex* index = taosArrayGet(pOrderColumns, i);
+ SColIndex* idx = taosArrayGet(pOrderColumns, i);
bool found = false;
for(int32_t j = 0; j < pQuery->numOfOutput; ++j) {
SSqlExpr* pExpr = &pQuery->pExpr1[j].base;
// TSDB_FUNC_TAG_DUMMY function needs to be ignored
- if (index->colId == pExpr->colInfo.colId &&
+ if (idx->colId == pExpr->colInfo.colId &&
((TSDB_COL_IS_TAG(pExpr->colInfo.flag) && ((pExpr->functionId == TSDB_FUNC_TAG) || (pExpr->functionId == TSDB_FUNC_TAGPRJ))) ||
(TSDB_COL_IS_NORMAL_COL(pExpr->colInfo.flag) && pExpr->functionId == TSDB_FUNC_PRJ))) {
- index->colIndex = j;
- index->colId = pExpr->resColId;
+ idx->colIndex = j;
+ idx->colId = pExpr->resColId;
found = true;
break;
}
}
- assert(found && index->colIndex >= 0 && index->colIndex < pQuery->numOfOutput);
+ assert(found && idx->colIndex >= 0 && idx->colIndex < pQuery->numOfOutput);
}
return pOrderColumns;
@@ -5774,8 +5927,8 @@ SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv,
for(int32_t i = 0; i < numOfCols; ++i) {
pInfo->prevRow[i] = (char*)pInfo->prevRow + offset;
- SColIndex* index = taosArrayGet(pInfo->orderColumnList, i);
- offset += pExpr[index->colIndex].base.resBytes;
+ SColIndex* idx = taosArrayGet(pInfo->orderColumnList, i);
+ offset += pExpr[idx->colIndex].base.resBytes;
}
numOfCols = (pInfo->groupColumnList != NULL)? (int32_t)taosArrayGetSize(pInfo->groupColumnList):0;
@@ -5789,8 +5942,8 @@ SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv,
for(int32_t i = 0; i < numOfCols; ++i) {
pInfo->currentGroupColData[i] = (char*)pInfo->currentGroupColData + offset;
- SColIndex* index = taosArrayGet(pInfo->groupColumnList, i);
- offset += pExpr[index->colIndex].base.resBytes;
+ SColIndex* idx = taosArrayGet(pInfo->groupColumnList, i);
+ offset += pExpr[idx->colIndex].base.resBytes;
}
initResultRowInfo(&pInfo->binfo.resultRowInfo, 8, TSDB_DATA_TYPE_INT);
@@ -5857,8 +6010,8 @@ SOperatorInfo *createMultiwaySortOperatorInfo(SQueryRuntimeEnv *pRuntimeEnv, SEx
for(int32_t i = 0; i < numOfCols; ++i) {
pInfo->prevRow[i] = (char*)pInfo->prevRow + offset;
- SColIndex* index = taosArrayGet(pInfo->orderColumnList, i);
- offset += pExpr[index->colIndex].base.colBytes;
+ SColIndex* idx = taosArrayGet(pInfo->orderColumnList, i);
+ offset += pExpr[idx->colIndex].base.colBytes;
}
}
@@ -6496,7 +6649,7 @@ static bool doEveryInterpolation(SOperatorInfo* pOperatorInfo, SSDataBlock* pBlo
STimeEveryOperatorInfo* pEveryInfo = (STimeEveryOperatorInfo*)pOperatorInfo->info;
SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr;
bool ascQuery = QUERY_IS_ASC_QUERY(pQueryAttr);
- int32_t gidx = pRuntimeEnv->current->groupIndex;
+ int32_t gindex = pRuntimeEnv->current->groupIndex;
SQLFunctionCtx* pCtx = NULL;
*needApply = false;
@@ -6702,7 +6855,7 @@ static bool doEveryInterpolation(SOperatorInfo* pOperatorInfo, SSDataBlock* pBlo
group_finished_exit:
- qDebug("group idx[%d] interp finished", gidx);
+ qDebug("group index[%d] interp finished", gindex);
if (pQueryAttr->needReverseScan) {
pQueryAttr->range.skey = INT64_MIN;
@@ -8132,8 +8285,8 @@ SOperatorInfo* createSLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperator
for(int32_t i = 0; i < numOfCols; ++i) {
pInfo->prevRow[i] = (char*)pInfo->prevRow + offset;
- SColIndex* index = taosArrayGet(pInfo->orderColumnList, i);
- offset += pExpr[index->colIndex].base.resBytes;
+ SColIndex* idx = taosArrayGet(pInfo->orderColumnList, i);
+ offset += pExpr[idx->colIndex].base.resBytes;
}
pInfo->pRes = createOutputBuf(pExpr, numOfOutput, pRuntimeEnv->resultInfo.capacity);
@@ -8888,7 +9041,7 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) {
pMsg += tListLen(param->pGroupColIndex[i].name);
}
- //pQueryMsg->orderByIdx = htons(pQueryMsg->orderByIdx);
+ //pQueryMsg->orderByIndex = htons(pQueryMsg->orderByIndex);
pQueryMsg->groupOrderType = htons(pQueryMsg->groupOrderType);
}
@@ -9441,11 +9594,11 @@ int32_t createIndirectQueryFuncExprFromMsg(SQueryTableMsg* pQueryMsg, int32_t nu
pExprs[i].base.resType = pExprs[i].pExpr->resultType;
pExprs[i].base.interBytes = 0;
} else {
- int32_t index = pExprs[i].base.colInfo.colIndex;
- assert(prevExpr[index].base.resColId == pExprs[i].base.colInfo.colId);
+ int32_t idx = pExprs[i].base.colInfo.colIndex;
+ assert(prevExpr[idx].base.resColId == pExprs[i].base.colInfo.colId);
- type = prevExpr[index].base.resType;
- bytes = prevExpr[index].base.resBytes;
+ type = prevExpr[idx].base.resType;
+ bytes = prevExpr[idx].base.resBytes;
int32_t param = (int32_t)pExprs[i].base.param[0].i64;
if (getResultDataInfo(type, bytes, pExprs[i].base.functionId, param, &pExprs[i].base.resType,
@@ -9476,7 +9629,7 @@ SGroupbyExpr *createGroupbyExprFromMsg(SQueryTableMsg *pQueryMsg, SColIndex *pCo
pGroupbyExpr->numOfGroupCols = pQueryMsg->numOfGroupCols;
pGroupbyExpr->orderType = pQueryMsg->groupOrderType;
- //pGroupbyExpr->orderIndex = pQueryMsg->orderByIdx;
+ //pGroupbyExpr->orderIndex = pQueryMsg->orderByIndex;
pGroupbyExpr->columnInfo = taosArrayInit(pQueryMsg->numOfGroupCols, sizeof(SColIndex));
for(int32_t i = 0; i < pQueryMsg->numOfGroupCols; ++i) {
@@ -9786,7 +9939,7 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, S
SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv;
STimeWindow window = pQueryAttr->window;
- int32_t index = 0;
+ int32_t idx = 0;
for(int32_t i = 0; i < numOfGroups; ++i) {
SArray* pa = taosArrayGetP(pQueryAttr->tableGroupInfo.pGroupList, i);
@@ -9802,7 +9955,7 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, S
STableKeyInfo* info = taosArrayGet(pa, j);
window.skey = info->lastKey;
- void* buf = (char*) pQInfo->pBuf + index * sizeof(STableQueryInfo);
+ void* buf = (char*) pQInfo->pBuf + idx * sizeof(STableQueryInfo);
STableQueryInfo* item = createTableQueryInfo(pQueryAttr, info->pTable, pQueryAttr->groupbyColumn, window, buf);
if (item == NULL) {
goto _cleanup;
@@ -9813,7 +9966,7 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, S
STableId* id = TSDB_TABLEID(info->pTable);
taosHashPut(pRuntimeEnv->tableqinfoGroupInfo.map, &id->tid, sizeof(id->tid), &item, POINTER_BYTES);
- index += 1;
+ idx += 1;
}
}
diff --git a/src/query/src/qExtbuffer.c b/src/query/src/qExtbuffer.c
index 4a4ae3ca422a5e65e74d1c566dcd4c99de4e83a2..e2c649e99c01d5c8b9f6497eddbaf126e644ce09 100644
--- a/src/query/src/qExtbuffer.c
+++ b/src/query/src/qExtbuffer.c
@@ -485,9 +485,9 @@ int32_t compare_a(tOrderDescriptor *pDescriptor, int32_t numOfRows1, int32_t s1,
int32_t compare_aRv(SSDataBlock* pBlock, SArray* colIndex, int32_t numOfCols, int32_t rowIndex, char** buffer, int32_t order) {
for (int32_t i = 0; i < numOfCols; ++i) {
SColIndex* pColIndex = taosArrayGet(colIndex, i);
- int32_t index = pColIndex->colIndex;
+ int32_t idx = pColIndex->colIndex;
- SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, index);
+ SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, idx);
assert(pColIndex->colId == pColInfo->info.colId);
char* data = pColInfo->pData + rowIndex * pColInfo->info.bytes;
@@ -1176,14 +1176,14 @@ void tColModelCompact(SColumnModel *pModel, tFilePage *inputBuffer, int32_t maxE
}
}
-SSchema1* getColumnModelSchema(SColumnModel *pColumnModel, int32_t index) {
- assert(pColumnModel != NULL && index >= 0 && index < pColumnModel->numOfCols);
- return &pColumnModel->pFields[index].field;
+SSchema1* getColumnModelSchema(SColumnModel *pColumnModel, int32_t idx) {
+ assert(pColumnModel != NULL && idx >= 0 && idx < pColumnModel->numOfCols);
+ return &pColumnModel->pFields[idx].field;
}
-int16_t getColumnModelOffset(SColumnModel *pColumnModel, int32_t index) {
- assert(pColumnModel != NULL && index >= 0 && index < pColumnModel->numOfCols);
- return pColumnModel->pFields[index].offset;
+int16_t getColumnModelOffset(SColumnModel *pColumnModel, int32_t idx) {
+ assert(pColumnModel != NULL && idx >= 0 && idx < pColumnModel->numOfCols);
+ return pColumnModel->pFields[idx].offset;
}
void tColModelErase(SColumnModel *pModel, tFilePage *inputBuffer, int32_t blockCapacity, int32_t s, int32_t e) {
@@ -1257,17 +1257,17 @@ void tOrderDescDestroy(tOrderDescriptor *pDesc) {
tfree(pDesc);
}
-void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t index, __compar_fn_t compareFn) {
- assert(numOfRows > 0 && numOfCols > 0 && index >= 0 && index < numOfCols);
+void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t idx, __compar_fn_t compareFn) {
+ assert(numOfRows > 0 && numOfCols > 0 && idx >= 0 && idx < numOfCols);
- int32_t bytes = pSchema[index].bytes;
+ int32_t bytes = pSchema[idx].bytes;
int32_t size = bytes + sizeof(int32_t);
char* buf = calloc(1, size * numOfRows);
for(int32_t i = 0; i < numOfRows; ++i) {
char* dest = buf + size * i;
- memcpy(dest, ((char*) pCols[index]) + bytes * i, bytes);
+ memcpy(dest, ((char*) pCols[idx]) + bytes * i, bytes);
*(int32_t*)(dest+bytes) = i;
}
@@ -1279,7 +1279,7 @@ void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOf
for(int32_t i = 0; i < numOfCols; ++i) {
int32_t bytes1 = pSchema[i].bytes;
- if (i == index) {
+ if (i == idx) {
for(int32_t j = 0; j < numOfRows; ++j){
char* src = buf + (j * size);
char* dest = ((char*)pCols[i]) + (j * bytes1);
diff --git a/src/query/src/qFill.c b/src/query/src/qFill.c
index d83620c78fe0cc87a1fff61b6c58dff9852ecbec..1f4bbed831952ab8c1f9c822defc97149742e4c7 100644
--- a/src/query/src/qFill.c
+++ b/src/query/src/qFill.c
@@ -63,8 +63,8 @@ static void doFillOneRowResult(SFillInfo* pFillInfo, void** data, char** srcData
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pFillInfo->order);
// set the primary timestamp column value
- int32_t index = pFillInfo->numOfCurrent;
- char* val = elePtrAt(data[0], TSDB_KEYSIZE, index);
+ int32_t idx = pFillInfo->numOfCurrent;
+ char* val = elePtrAt(data[0], TSDB_KEYSIZE, idx);
*(TSKEY*) val = pFillInfo->currentKey;
// set the other values
@@ -78,11 +78,11 @@ static void doFillOneRowResult(SFillInfo* pFillInfo, void** data, char** srcData
continue;
}
- char* output = elePtrAt(data[i], pCol->col.bytes, index);
+ char* output = elePtrAt(data[i], pCol->col.bytes, idx);
assignVal(output, p + pCol->col.offset, pCol->col.bytes, pCol->col.type);
}
} else { // no prev value yet, set the value for NULL
- setNullValueForRow(pFillInfo, data, pFillInfo->numOfCols, index);
+ setNullValueForRow(pFillInfo, data, pFillInfo->numOfCols, idx);
}
} else if (pFillInfo->type == TSDB_FILL_NEXT) {
char* p = next;
@@ -94,11 +94,11 @@ static void doFillOneRowResult(SFillInfo* pFillInfo, void** data, char** srcData
continue;
}
- char* output = elePtrAt(data[i], pCol->col.bytes, index);
+ char* output = elePtrAt(data[i], pCol->col.bytes, idx);
assignVal(output, p + pCol->col.offset, pCol->col.bytes, pCol->col.type);
}
} else { // no prev value yet, set the value for NULL
- setNullValueForRow(pFillInfo, data, pFillInfo->numOfCols, index);
+ setNullValueForRow(pFillInfo, data, pFillInfo->numOfCols, idx);
}
} else if (pFillInfo->type == TSDB_FILL_LINEAR) {
if (prev != NULL && !outOfBound) {
@@ -111,7 +111,7 @@ static void doFillOneRowResult(SFillInfo* pFillInfo, void** data, char** srcData
int16_t type = pCol->col.type;
int16_t bytes = pCol->col.bytes;
- char *val1 = elePtrAt(data[i], pCol->col.bytes, index);
+ char *val1 = elePtrAt(data[i], pCol->col.bytes, idx);
if (type == TSDB_DATA_TYPE_BINARY|| type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BOOL) {
setNull(val1, pCol->col.type, bytes);
continue;
@@ -128,7 +128,7 @@ static void doFillOneRowResult(SFillInfo* pFillInfo, void** data, char** srcData
taosGetLinearInterpolationVal(&point, type, &point1, &point2, type, &exceedMax, &exceedMin);
}
} else {
- setNullValueForRow(pFillInfo, data, pFillInfo->numOfCols, index);
+ setNullValueForRow(pFillInfo, data, pFillInfo->numOfCols, idx);
}
} else { // fill the default value */
for (int32_t i = 1; i < pFillInfo->numOfCols; ++i) {
@@ -137,12 +137,12 @@ static void doFillOneRowResult(SFillInfo* pFillInfo, void** data, char** srcData
continue;
}
- char* val1 = elePtrAt(data[i], pCol->col.bytes, index);
+ char* val1 = elePtrAt(data[i], pCol->col.bytes, idx);
assignVal(val1, (char*)&pCol->fillVal.i, pCol->col.bytes, pCol->col.type);
}
}
- setTagsValue(pFillInfo, data, index);
+ setTagsValue(pFillInfo, data, idx);
pFillInfo->currentKey = taosTimeAdd(pFillInfo->currentKey, pFillInfo->interval.sliding * step, pFillInfo->interval.slidingUnit, pFillInfo->precision);
pFillInfo->numOfCurrent++;
}
@@ -303,11 +303,11 @@ static int32_t setTagColumnInfo(SFillInfo* pFillInfo, int32_t numOfCols, int32_t
numOfTags += 1;
bool exists = false;
- int32_t index = -1;
+ int32_t idx = -1;
for (int32_t j = 0; j < k; ++j) {
if (pFillInfo->pTags[j].col.colId == pColInfo->col.colId) {
exists = true;
- index = j;
+ idx = j;
break;
}
}
@@ -323,7 +323,7 @@ static int32_t setTagColumnInfo(SFillInfo* pFillInfo, int32_t numOfCols, int32_t
k += 1;
} else {
- pColInfo->tagIndex = index;
+ pColInfo->tagIndex = idx;
}
}
diff --git a/src/query/src/qFilter.c b/src/query/src/qFilter.c
index 11ace4f7cee127865304f0a5d04f07801e449f74..0d9fdb814a5af9df0901cf0c6d61e2a12554cfea 100644
--- a/src/query/src/qFilter.c
+++ b/src/query/src/qFilter.c
@@ -975,7 +975,7 @@ int32_t filterAddUnitToGroup(SFilterGroup *group, uint32_t unitIdx) {
return TSDB_CODE_SUCCESS;
}
-int32_t filterConvertSetFromBinary(void **q, const char *buf, int32_t len, uint32_t tType, bool tolower) {
+int32_t filterConvertSetFromBinary(void **q, const char *buf, int32_t len, uint32_t tType, bool bTolower) {
SBufferReader br = tbufInitReader(buf, len, false);
uint32_t sType = tbufReadUint32(&br);
SHashObj *pObj = taosHashInit(256, taosGetDefaultHashFunction(tType), true, false);
@@ -1158,7 +1158,7 @@ int32_t filterConvertSetFromBinary(void **q, const char *buf, int32_t len, uint3
t = varDataLen(tmp);
pvar = varDataVal(tmp);
- if (tolower) {
+ if (bTolower) {
strntolower_s(pvar, pvar, (int32_t)t);
}
break;
@@ -2746,7 +2746,7 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SDataStatis *pDataStatis, int32_t
memset(info->blkUnitRes, 0, sizeof(*info->blkUnitRes) * info->unitNum);
for (uint32_t k = 0; k < info->unitNum; ++k) {
- int32_t index = -1;
+ int32_t idx = -1;
SFilterComUnit *cunit = &info->cunits[k];
if (FILTER_NO_MERGE_DATA_TYPE(cunit->dataType)) {
@@ -2755,16 +2755,16 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SDataStatis *pDataStatis, int32_t
for(int32_t i = 0; i < numOfCols; ++i) {
if (pDataStatis[i].colId == cunit->colId) {
- index = i;
+ idx = i;
break;
}
}
- if (index == -1) {
+ if (idx == -1) {
continue;
}
- if (pDataStatis[index].numOfNull <= 0) {
+ if (pDataStatis[idx].numOfNull <= 0) {
if (cunit->optr == TSDB_RELATION_ISNULL) {
info->blkUnitRes[k] = -1;
rmUnit = 1;
@@ -2777,7 +2777,7 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SDataStatis *pDataStatis, int32_t
continue;
}
} else {
- if (pDataStatis[index].numOfNull == numOfRows) {
+ if (pDataStatis[idx].numOfNull == numOfRows) {
if (cunit->optr == TSDB_RELATION_ISNULL) {
info->blkUnitRes[k] = 1;
rmUnit = 1;
@@ -2796,7 +2796,7 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SDataStatis *pDataStatis, int32_t
continue;
}
- SDataStatis* pDataBlockst = &pDataStatis[index];
+ SDataStatis* pDataBlockst = &pDataStatis[idx];
void *minVal, *maxVal;
float minv = 0;
float maxv = 0;
@@ -3586,17 +3586,17 @@ bool filterRangeExecute(SFilterInfo *info, SDataStatis *pDataStatis, int32_t num
void *minVal, *maxVal;
for (uint32_t k = 0; k < info->colRangeNum; ++k) {
- int32_t index = -1;
+ int32_t idx = -1;
SFilterRangeCtx *ctx = info->colRange[k];
for(int32_t i = 0; i < numOfCols; ++i) {
if (pDataStatis[i].colId == ctx->colId) {
- index = i;
+ idx = i;
break;
}
}
// no statistics data, load the true data block
- if (index == -1) {
+ if (idx == -1) {
break;
}
@@ -3605,13 +3605,13 @@ bool filterRangeExecute(SFilterInfo *info, SDataStatis *pDataStatis, int32_t num
break;
}
- if (pDataStatis[index].numOfNull <= 0) {
+ if (pDataStatis[idx].numOfNull <= 0) {
if (ctx->isnull && !ctx->notnull && !ctx->isrange) {
ret = false;
break;
}
- } else if (pDataStatis[index].numOfNull > 0) {
- if (pDataStatis[index].numOfNull == numOfRows) {
+ } else if (pDataStatis[idx].numOfNull > 0) {
+ if (pDataStatis[idx].numOfNull == numOfRows) {
if ((ctx->notnull || ctx->isrange) && (!ctx->isnull)) {
ret = false;
break;
@@ -3625,7 +3625,7 @@ bool filterRangeExecute(SFilterInfo *info, SDataStatis *pDataStatis, int32_t num
}
}
- SDataStatis* pDataBlockst = &pDataStatis[index];
+ SDataStatis* pDataBlockst = &pDataStatis[idx];
SFilterRangeNode *r = ctx->rs;
float minv = 0;
diff --git a/src/query/src/qHistogram.c b/src/query/src/qHistogram.c
index 8544224a647c0497677814ef448498bbf73fab04..752c7b96a5594ea49a2e11332d332ffa4e4aab37 100644
--- a/src/query/src/qHistogram.c
+++ b/src/query/src/qHistogram.c
@@ -45,15 +45,15 @@
//}
//
////min heap
-// void tHeapAdjust(SHeapEntry* pEntry, int32_t index, int32_t len) {
+// void tHeapAdjust(SHeapEntry* pEntry, int32_t idx, int32_t len) {
// SHeapEntry* ptr = NULL;
//
// int32_t end = len - 1;
//
-// SHeapEntry p1 = pEntry[index];
-// int32_t next = index;
+// SHeapEntry p1 = pEntry[idx];
+// int32_t next = idx;
//
-// for(int32_t i=index; i<=(end-1)/2; ) {
+// for(int32_t i=idx; i<=(end-1)/2; ) {
// int32_t lc = (i<<1) + 1;
// int32_t rc = (i+1) << 1;
//
@@ -119,7 +119,7 @@
// }
//}
-static int32_t histogramCreateBin(SHistogramInfo* pHisto, int32_t index, double val);
+static int32_t histogramCreateBin(SHistogramInfo* pHisto, int32_t idx, double val);
SHistogramInfo* tHistogramCreate(int32_t numOfEntries) {
/* need one redundant slot */
@@ -191,7 +191,7 @@ int32_t tHistogramAdd(SHistogramInfo** pHisto, double val) {
tSkipListNode* pResNode = SSkipListPut((*pHisto)->pList, entry, &key, 0);
SHistBin* pEntry1 = (SHistBin*)pResNode->pData;
- pEntry1->index = -1;
+ pEntry1->idx = -1;
tSkipListNode* pLast = NULL;
@@ -209,7 +209,7 @@ int32_t tHistogramAdd(SHistogramInfo** pHisto, double val) {
SLoserTreeInfo* pTree = (*pHisto)->pLoserTree;
(*pHisto)->pLoserTree->pNode[lastIndex + pTree->numOfEntries].pData = pResNode;
- pEntry1->index = (*pHisto)->pLoserTree->pNode[lastIndex + pTree->numOfEntries].index;
+ pEntry1->idx = (*pHisto)->pLoserTree->pNode[lastIndex + pTree->numOfEntries].index;
// update the loser tree
if ((*pHisto)->ordered) {
@@ -390,39 +390,39 @@ static void histogramMergeImpl(SHistBin* pHistBin, int32_t* size) {
int32_t oldSize = *size;
double delta = DBL_MAX;
- int32_t index = -1;
+ int32_t idx = -1;
for (int32_t i = 1; i < oldSize; ++i) {
double d = pHistBin[i].val - pHistBin[i - 1].val;
if (d < delta) {
delta = d;
- index = i - 1;
+ idx = i - 1;
}
}
- SHistBin* s1 = &pHistBin[index];
- SHistBin* s2 = &pHistBin[index + 1];
+ SHistBin* s1 = &pHistBin[idx];
+ SHistBin* s2 = &pHistBin[idx + 1];
double newVal = (s1->val * s1->num + s2->val * s2->num) / (s1->num + s2->num);
s1->val = newVal;
s1->num = s1->num + s2->num;
- memmove(&pHistBin[index + 1], &pHistBin[index + 2], (oldSize - index - 2) * sizeof(SHistBin));
+ memmove(&pHistBin[idx + 1], &pHistBin[idx + 2], (oldSize - idx - 2) * sizeof(SHistBin));
(*size) -= 1;
#endif
}
/* optimize this procedure */
-int32_t histogramCreateBin(SHistogramInfo* pHisto, int32_t index, double val) {
+int32_t histogramCreateBin(SHistogramInfo* pHisto, int32_t idx, double val) {
#if defined(USE_ARRAYLIST)
- int32_t remain = pHisto->numOfEntries - index;
+ int32_t remain = pHisto->numOfEntries - idx;
if (remain > 0) {
- memmove(&pHisto->elems[index + 1], &pHisto->elems[index], sizeof(SHistBin) * remain);
+ memmove(&pHisto->elems[idx + 1], &pHisto->elems[idx], sizeof(SHistBin) * remain);
}
- assert(index >= 0 && index <= pHisto->maxEntries);
+ assert(idx >= 0 && idx <= pHisto->maxEntries);
- pHisto->elems[index].num = 1;
- pHisto->elems[index].val = val;
+ pHisto->elems[idx].num = 1;
+ pHisto->elems[idx].val = val;
pHisto->numOfEntries += 1;
/* we need to merge the slot */
diff --git a/src/query/src/qPercentile.c b/src/query/src/qPercentile.c
index 8428c339f4e89d6a5e988448f3aadadf522102b1..0210888b178c2b73c913cf28a45fa9eb11d8ad12 100644
--- a/src/query/src/qPercentile.c
+++ b/src/query/src/qPercentile.c
@@ -122,54 +122,54 @@ int32_t tBucketIntHash(tMemBucket *pBucket, const void *value) {
int64_t v = 0;
GET_TYPED_DATA(v, int64_t, pBucket->type, value);
- int32_t index = -1;
+ int32_t idx = -1;
if (v > pBucket->range.i64MaxVal || v < pBucket->range.i64MinVal) {
- return index;
+ return idx;
}
// divide the value range into 1024 buckets
uint64_t span = pBucket->range.i64MaxVal - pBucket->range.i64MinVal;
if (span < pBucket->numOfSlots) {
int64_t delta = v - pBucket->range.i64MinVal;
- index = (delta % pBucket->numOfSlots);
+ idx = (delta % pBucket->numOfSlots);
} else {
double slotSpan = (double)span / pBucket->numOfSlots;
- index = (int32_t)(((double)v - pBucket->range.i64MinVal) / slotSpan);
- if (index == pBucket->numOfSlots) {
- index -= 1;
+ idx = (int32_t)(((double)v - pBucket->range.i64MinVal) / slotSpan);
+ if (idx == pBucket->numOfSlots) {
+ idx -= 1;
}
}
- assert(index >= 0 && index < pBucket->numOfSlots);
- return index;
+ assert(idx >= 0 && idx < pBucket->numOfSlots);
+ return idx;
}
int32_t tBucketUintHash(tMemBucket *pBucket, const void *value) {
uint64_t v = 0;
GET_TYPED_DATA(v, uint64_t, pBucket->type, value);
- int32_t index = -1;
+ int32_t idx = -1;
if (v > pBucket->range.u64MaxVal || v < pBucket->range.u64MinVal) {
- return index;
+ return idx;
}
// divide the value range into 1024 buckets
uint64_t span = pBucket->range.u64MaxVal - pBucket->range.u64MinVal;
if (span < pBucket->numOfSlots) {
int64_t delta = v - pBucket->range.u64MinVal;
- index = (int32_t) (delta % pBucket->numOfSlots);
+ idx = (int32_t) (delta % pBucket->numOfSlots);
} else {
double slotSpan = (double)span / pBucket->numOfSlots;
- index = (int32_t)(((double)v - pBucket->range.u64MinVal) / slotSpan);
- if (index == pBucket->numOfSlots) {
- index -= 1;
+ idx = (int32_t)(((double)v - pBucket->range.u64MinVal) / slotSpan);
+ if (idx == pBucket->numOfSlots) {
+ idx -= 1;
}
}
- assert(index >= 0 && index < pBucket->numOfSlots);
- return index;
+ assert(idx >= 0 && idx < pBucket->numOfSlots);
+ return idx;
}
int32_t tBucketDoubleHash(tMemBucket *pBucket, const void *value) {
@@ -180,27 +180,27 @@ int32_t tBucketDoubleHash(tMemBucket *pBucket, const void *value) {
v = GET_DOUBLE_VAL(value);
}
- int32_t index = -1;
+ int32_t idx = -1;
if (v > pBucket->range.dMaxVal || v < pBucket->range.dMinVal) {
- return index;
+ return idx;
}
// divide a range of [dMinVal, dMaxVal] into 1024 buckets
double span = pBucket->range.dMaxVal - pBucket->range.dMinVal;
if (span < pBucket->numOfSlots) {
int32_t delta = (int32_t)(v - pBucket->range.dMinVal);
- index = (delta % pBucket->numOfSlots);
+ idx = (delta % pBucket->numOfSlots);
} else {
double slotSpan = span / pBucket->numOfSlots;
- index = (int32_t)((v - pBucket->range.dMinVal) / slotSpan);
- if (index == pBucket->numOfSlots) {
- index -= 1;
+ idx = (int32_t)((v - pBucket->range.dMinVal) / slotSpan);
+ if (idx == pBucket->numOfSlots) {
+ idx -= 1;
}
}
- assert(index >= 0 && index < pBucket->numOfSlots);
- return index;
+ assert(idx >= 0 && idx < pBucket->numOfSlots);
+ return idx;
}
static __perc_hash_func_t getHashFunc(int32_t type) {
@@ -332,18 +332,18 @@ int32_t tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size) {
for (int32_t i = 0; i < size; ++i) {
char *d = (char *) data + i * bytes;
- int32_t index = (pBucket->hashFunc)(pBucket, d);
- if (index < 0) {
+ int32_t idx = (pBucket->hashFunc)(pBucket, d);
+ if (idx < 0) {
continue;
}
count += 1;
- tMemBucketSlot *pSlot = &pBucket->pSlots[index];
+ tMemBucketSlot *pSlot = &pBucket->pSlots[idx];
tMemBucketUpdateBoundingBox(&pSlot->range, d, pBucket->type);
// ensure available memory pages to allocate
- int32_t groupId = getGroupId(pBucket->numOfSlots, index, pBucket->times);
+ int32_t groupId = getGroupId(pBucket->numOfSlots, idx, pBucket->times);
int32_t pageId = -1;
if (pSlot->info.data == NULL || pSlot->info.data->num >= pBucket->elemPerPage) {
@@ -387,7 +387,7 @@ static MinMaxEntry getMinMaxEntryOfNextSlotWithData(tMemBucket *pMemBucket, int3
return pMemBucket->pSlots[j].range;
}
-static bool isIdenticalData(tMemBucket *pMemBucket, int32_t index);
+static bool isIdenticalData(tMemBucket *pMemBucket, int32_t idx);
static double getIdenticalDataVal(tMemBucket* pMemBucket, int32_t slotIndex) {
assert(isIdenticalData(pMemBucket, slotIndex));
@@ -532,8 +532,8 @@ double getPercentile(tMemBucket *pMemBucket, double percent) {
/*
* check if data in one slot are all identical only need to compare with the bounding box
*/
-bool isIdenticalData(tMemBucket *pMemBucket, int32_t index) {
- tMemBucketSlot *pSeg = &pMemBucket->pSlots[index];
+bool isIdenticalData(tMemBucket *pMemBucket, int32_t idx) {
+ tMemBucketSlot *pSeg = &pMemBucket->pSlots[idx];
if (IS_FLOAT_TYPE(pMemBucket->type)) {
return fabs(pSeg->range.dMaxVal - pSeg->range.dMinVal) < DBL_EPSILON;
diff --git a/src/query/src/qPlan.c b/src/query/src/qPlan.c
index 95c7f81ed68d0ef8f303ee45deda89e347d163d9..eda920063f503d1795f4a5cf86cd925c67501140 100644
--- a/src/query/src/qPlan.c
+++ b/src/query/src/qPlan.c
@@ -126,9 +126,9 @@ static SQueryNode* doAddTableColumnNode(SQueryInfo* pQueryInfo, STableMetaInfo*
for (int32_t i = 0; i < numOfCols; ++i) {
SColumn* pCol = taosArrayGetP(tableCols, i);
- SColumnIndex index = {.tableIndex = 0, .columnIndex = pCol->columnIndex};
- STableMetaInfo* pTableMetaInfo1 = tscGetMetaInfo(pQueryInfo, index.tableIndex);
- SExprInfo* p = tscExprCreate(pTableMetaInfo1, TSDB_FUNC_PRJ, &index, pCol->info.type, pCol->info.bytes,
+ SColumnIndex idx = {.tableIndex = 0, .columnIndex = pCol->columnIndex};
+ STableMetaInfo* pTableMetaInfo1 = tscGetMetaInfo(pQueryInfo, idx.tableIndex);
+ SExprInfo* p = tscExprCreate(pTableMetaInfo1, TSDB_FUNC_PRJ, &idx, pCol->info.type, pCol->info.bytes,
pCol->info.colId, 0, TSDB_COL_NORMAL);
strncpy(p->base.aliasName, pSchema[pCol->columnIndex].name, tListLen(p->base.aliasName));
diff --git a/src/query/src/qSqlParser.c b/src/query/src/qSqlParser.c
index cee5130651c6c8ff9e2db321592dc73503454da3..fe459ee460bbbcc0072f647b88f9a9ef51117a2a 100644
--- a/src/query/src/qSqlParser.c
+++ b/src/query/src/qSqlParser.c
@@ -857,8 +857,8 @@ SArray *tVariantListAppend(SArray *pList, tVariant *pVar, uint8_t sortOrder) {
return pList;
}
-SArray *tVariantListInsert(SArray *pList, tVariant *pVar, uint8_t sortOrder, int32_t index) {
- if (pList == NULL || pVar == NULL || index >= taosArrayGetSize(pList)) {
+SArray *tVariantListInsert(SArray *pList, tVariant *pVar, uint8_t sortOrder, int32_t idx) {
+ if (pList == NULL || pVar == NULL || idx >= taosArrayGetSize(pList)) {
return tVariantListAppend(NULL, pVar, sortOrder);
}
@@ -867,7 +867,7 @@ SArray *tVariantListInsert(SArray *pList, tVariant *pVar, uint8_t sortOrder, int
item.pVar = *pVar;
item.sortOrder = sortOrder;
- taosArrayInsert(pList, index, &item);
+ taosArrayInsert(pList, idx, &item);
return pList;
}
@@ -878,7 +878,8 @@ SRelationInfo *setTableNameList(SRelationInfo* pRelationInfo, SStrToken *pName,
}
pRelationInfo->type = SQL_NODE_FROM_TABLELIST;
- SRelElementPair p = {.tableName = *pName};
+ SRelElementPair p;
+ p.tableName = *pName;
if (pAlias != NULL) {
p.aliasName = *pAlias;
} else {
@@ -917,7 +918,8 @@ SRelationInfo* addSubqueryElem(SRelationInfo* pRelationInfo, SArray* pSub, SStrT
pRelationInfo->type = SQL_NODE_FROM_SUBQUERY;
- SRelElementPair p = {.pSubquery = pSub};
+ SRelElementPair p;
+ p.pSubquery = pSub;
if (pAlias != NULL) {
p.aliasName = *pAlias;
} else {
@@ -1181,6 +1183,10 @@ void destroySqlNode(SSqlNode *pSqlNode) {
pSqlNode->fillType = NULL;
tSqlExprDestroy(pSqlNode->pHaving);
+
+ tSqlExprDestroy(pSqlNode->pRange.start);
+ tSqlExprDestroy(pSqlNode->pRange.end);
+
free(pSqlNode);
}
diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c
index 1628c2d5111268111ca88cbe511095e8334453ed..60ae900eb75d14e1d84100898154c593a00c1020 100644
--- a/src/query/src/qUtil.c
+++ b/src/query/src/qUtil.c
@@ -183,9 +183,9 @@ void clearResultRow(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResultRow, int16
}
// TODO refactor: use macro
-SResultRowCellInfo* getResultCell(const SResultRow* pRow, int32_t index, int32_t* offset) {
- assert(index >= 0 && offset != NULL);
- return (SResultRowCellInfo*)((char*) pRow->pCellInfo + offset[index]);
+SResultRowCellInfo* getResultCell(const SResultRow* pRow, int32_t idx, int32_t* offset) {
+ assert(idx >= 0 && offset != NULL);
+ return (SResultRowCellInfo*)((char*) pRow->pCellInfo + offset[idx]);
}
size_t getResultRowSize(SQueryRuntimeEnv* pRuntimeEnv) {
diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c
index b2941a8fe00511902165403bd0e36f6cad8219af..ee3245dd575c230220fe71edb101388dcf8978a6 100644
--- a/src/query/src/queryMain.c
+++ b/src/query/src/queryMain.c
@@ -597,7 +597,7 @@ void** qReleaseQInfo(void* pMgmt, void* pQInfo, bool freeHandle) {
//kill by qid
int32_t qKillQueryByQId(void* pMgmt, int64_t qId, int32_t waitMs, int32_t waitCount) {
- int32_t error = TSDB_CODE_SUCCESS;
+ int32_t err = TSDB_CODE_SUCCESS;
void** handle = qAcquireQInfo(pMgmt, qId);
if(handle == NULL) return terrno;
@@ -613,13 +613,13 @@ int32_t qKillQueryByQId(void* pMgmt, int64_t qId, int32_t waitMs, int32_t waitCo
while (pQInfo->owner != 0) {
taosMsleep(waitMs);
if(loop++ > waitCount){
- error = TSDB_CODE_FAILED;
+ err = TSDB_CODE_FAILED;
break;
}
}
qReleaseQInfo(pMgmt, (void **)&handle, true);
- return error;
+ return err;
}
// local struct
diff --git a/src/rpc/src/rpcCache.c b/src/rpc/src/rpcCache.c
index 60a12c26b78626ed81cbc182d76c836c6ee74498..d18aa12c13ef30b62dc286008008a54f0de5778e 100644
--- a/src/rpc/src/rpcCache.c
+++ b/src/rpc/src/rpcCache.c
@@ -49,7 +49,7 @@ static int rpcHashConn(void *handle, char *fqdn, uint16_t port, int8_t connType
static void rpcLockCache(int64_t *lockedBy);
static void rpcUnlockCache(int64_t *lockedBy);
static void rpcCleanConnCache(void *handle, void *tmrId);
-static void rpcRemoveExpiredNodes(SConnCache *pCache, SConnHash *pNode, int hash, uint64_t time);
+static void rpcRemoveExpiredNodes(SConnCache *pCache, SConnHash *pNode, int hash, uint64_t timeStamp);
void *rpcOpenConnCache(int maxSessions, void (*cleanFp)(void *), void *tmrCtrl, int64_t keepTimer) {
SConnHash **connHashList;
@@ -118,7 +118,7 @@ void rpcAddConnIntoCache(void *handle, void *data, char *fqdn, uint16_t port, in
SConnHash * pNode;
SConnCache *pCache;
- uint64_t time = taosGetTimestampMs();
+ uint64_t timeStamp = taosGetTimestampMs();
pCache = (SConnCache *)handle;
assert(pCache);
@@ -131,7 +131,7 @@ void rpcAddConnIntoCache(void *handle, void *data, char *fqdn, uint16_t port, in
pNode->connType = connType;
pNode->data = data;
pNode->prev = NULL;
- pNode->time = time;
+ pNode->time = timeStamp;
rpcLockCache(pCache->lockedBy+hash);
@@ -140,7 +140,7 @@ void rpcAddConnIntoCache(void *handle, void *data, char *fqdn, uint16_t port, in
pCache->connHashList[hash] = pNode;
pCache->count[hash]++;
- rpcRemoveExpiredNodes(pCache, pNode->next, hash, time);
+ rpcRemoveExpiredNodes(pCache, pNode->next, hash, timeStamp);
rpcUnlockCache(pCache->lockedBy+hash);
@@ -159,15 +159,15 @@ void *rpcGetConnFromCache(void *handle, char *fqdn, uint16_t port, int8_t connTy
pCache = (SConnCache *)handle;
assert(pCache);
- uint64_t time = taosGetTimestampMs();
+ uint64_t timeStamp = taosGetTimestampMs();
hash = rpcHashConn(pCache, fqdn, port, connType);
rpcLockCache(pCache->lockedBy+hash);
pNode = pCache->connHashList[hash];
while (pNode) {
- if (time >= pCache->keepTimer + pNode->time) {
- rpcRemoveExpiredNodes(pCache, pNode, hash, time);
+ if (timeStamp >= pCache->keepTimer + pNode->time) {
+ rpcRemoveExpiredNodes(pCache, pNode, hash, timeStamp);
pNode = NULL;
break;
}
@@ -178,7 +178,7 @@ void *rpcGetConnFromCache(void *handle, char *fqdn, uint16_t port, int8_t connTy
}
if (pNode) {
- rpcRemoveExpiredNodes(pCache, pNode->next, hash, time);
+ rpcRemoveExpiredNodes(pCache, pNode->next, hash, timeStamp);
if (pNode->prev) {
pNode->prev->next = pNode->next;
@@ -217,12 +217,12 @@ static void rpcCleanConnCache(void *handle, void *tmrId) {
if (pCache->pTimer != tmrId) return;
pthread_mutex_lock(&pCache->mutex);
- uint64_t time = taosGetTimestampMs();
+ uint64_t timeStamp = taosGetTimestampMs();
for (hash = 0; hash < pCache->maxSessions; ++hash) {
rpcLockCache(pCache->lockedBy+hash);
pNode = pCache->connHashList[hash];
- rpcRemoveExpiredNodes(pCache, pNode, hash, time);
+ rpcRemoveExpiredNodes(pCache, pNode, hash, timeStamp);
rpcUnlockCache(pCache->lockedBy+hash);
}
@@ -231,8 +231,8 @@ static void rpcCleanConnCache(void *handle, void *tmrId) {
pthread_mutex_unlock(&pCache->mutex);
}
-static void rpcRemoveExpiredNodes(SConnCache *pCache, SConnHash *pNode, int hash, uint64_t time) {
- if (pNode == NULL || (time < pCache->keepTimer + pNode->time) ) return;
+static void rpcRemoveExpiredNodes(SConnCache *pCache, SConnHash *pNode, int hash, uint64_t timeStamp) {
+ if (pNode == NULL || (timeStamp < pCache->keepTimer + pNode->time) ) return;
SConnHash *pPrev = pNode->prev, *pNext;
diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c
index 10b729eddb9a5c743670698bf48206952d201f67..da08c924a54e7c74cdacecfb278bf7d63564b65c 100644
--- a/src/rpc/src/rpcMain.c
+++ b/src/rpc/src/rpcMain.c
@@ -361,8 +361,8 @@ void *rpcMallocCont(int contLen) {
void rpcFreeCont(void *cont) {
if (cont) {
char *temp = ((char *)cont) - sizeof(SRpcHead) - sizeof(SRpcReqContext);
- free(temp);
tTrace("free mem: %p", temp);
+ free(temp);
}
}
@@ -573,8 +573,8 @@ void rpcCancelRequest(int64_t rid) {
static void rpcFreeMsg(void *msg) {
if ( msg ) {
char *temp = (char *)msg - sizeof(SRpcReqContext);
- free(temp);
tTrace("free mem: %p", temp);
+ free(temp);
}
}
diff --git a/src/rpc/src/rpcTcp.c b/src/rpc/src/rpcTcp.c
index 001c50ee5dedae5cac9f3b87a8055b97b5af7984..aa13179af1b11487e96a4a6873a0c26774fc950a 100644
--- a/src/rpc/src/rpcTcp.c
+++ b/src/rpc/src/rpcTcp.c
@@ -392,9 +392,9 @@ void taosCleanUpTcpClient(void *chandle) {
void *taosOpenTcpClientConnection(void *shandle, void *thandle, uint32_t ip, uint16_t port) {
SClientObj * pClientObj = shandle;
- int32_t index = atomic_load_32(&pClientObj->index) % pClientObj->numOfThreads;
- atomic_store_32(&pClientObj->index, index + 1);
- SThreadObj *pThreadObj = pClientObj->pThreadObj[index];
+ int32_t idx = atomic_load_32(&pClientObj->index) % pClientObj->numOfThreads;
+ atomic_store_32(&pClientObj->index, idx + 1);
+ SThreadObj *pThreadObj = pClientObj->pThreadObj[idx];
SOCKET fd = taosOpenTcpClientSocket(ip, port, pThreadObj->ip);
#if defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32)
@@ -403,12 +403,12 @@ void *taosOpenTcpClientConnection(void *shandle, void *thandle, uint32_t ip, uin
if (fd <= 0) return NULL;
#endif
- struct sockaddr_in sin;
+ struct sockaddr_in sockin;
uint16_t localPort = 0;
- unsigned int addrlen = sizeof(sin);
- if (getsockname(fd, (struct sockaddr *)&sin, &addrlen) == 0 &&
- sin.sin_family == AF_INET && addrlen == sizeof(sin)) {
- localPort = (uint16_t)ntohs(sin.sin_port);
+ unsigned int addrlen = sizeof(sockin);
+ if (getsockname(fd, (struct sockaddr *)&sockin, &addrlen) == 0 &&
+ sockin.sin_family == AF_INET && addrlen == sizeof(sockin)) {
+ localPort = (uint16_t)ntohs(sockin.sin_port);
}
SFdObj *pFdObj = taosMallocFdObj(pThreadObj, fd);
diff --git a/src/rpc/src/rpcUdp.c b/src/rpc/src/rpcUdp.c
index 46313543d861ab1a2b56a236b0416cb373295bb7..7b0f27a3d63baf3479ade0579f7203b3774c370e 100644
--- a/src/rpc/src/rpcUdp.c
+++ b/src/rpc/src/rpcUdp.c
@@ -97,11 +97,11 @@ void *taosInitUdpConnection(uint32_t ip, uint16_t port, char *label, int threads
break;
}
- struct sockaddr_in sin;
- unsigned int addrlen = sizeof(sin);
- if (getsockname(pConn->fd, (struct sockaddr *)&sin, &addrlen) == 0 &&
- sin.sin_family == AF_INET && addrlen == sizeof(sin)) {
- pConn->localPort = (uint16_t)ntohs(sin.sin_port);
+ struct sockaddr_in sockin;
+ unsigned int addrlen = sizeof(sockin);
+ if (getsockname(pConn->fd, (struct sockaddr *)&sockin, &addrlen) == 0 &&
+ sockin.sin_family == AF_INET && addrlen == sizeof(sockin)) {
+ pConn->localPort = (uint16_t)ntohs(sockin.sin_port);
}
tstrncpy(pConn->label, label, sizeof(pConn->label));
diff --git a/src/rpc/test/rclient.c b/src/rpc/test/rclient.c
index 2f4433f1bb32e965de66a40d7d6ae36c6804a06c..42b77e624ed9bc599034b705f7557a79a9232380 100644
--- a/src/rpc/test/rclient.c
+++ b/src/rpc/test/rclient.c
@@ -70,7 +70,7 @@ static void *sendRequest(void *param) {
}
int main(int argc, char *argv[]) {
- SRpcInit rpcInit;
+ SRpcInit rpcInitial;
SRpcEpSet epSet;
int msgSize = 128;
int numOfReqs = 0;
@@ -90,18 +90,18 @@ int main(int argc, char *argv[]) {
strcpy(epSet.fqdn[1], "192.168.0.1");
// client info
- memset(&rpcInit, 0, sizeof(rpcInit));
- rpcInit.localPort = 0;
- rpcInit.label = "APP";
- rpcInit.numOfThreads = 1;
- rpcInit.cfp = processResponse;
- rpcInit.sessions = 100;
- rpcInit.idleTime = tsShellActivityTimer*1000;
- rpcInit.user = "michael";
- rpcInit.secret = secret;
- rpcInit.ckey = "key";
- rpcInit.spi = 1;
- rpcInit.connType = TAOS_CONN_CLIENT;
+ memset(&rpcInitial, 0, sizeof(rpcInitial));
+ rpcInitial.localPort = 0;
+ rpcInitial.label = "APP";
+ rpcInitial.numOfThreads = 1;
+ rpcInitial.cfp = processResponse;
+ rpcInitial.sessions = 100;
+ rpcInitial.idleTime = tsShellActivityTimer*1000;
+ rpcInitial.user = "michael";
+ rpcInitial.secret = secret;
+ rpcInitial.ckey = "key";
+ rpcInitial.spi = 1;
+ rpcInitial.connType = TAOS_CONN_CLIENT;
for (int i=1; ireplica; ++index) {
- const SNodeInfo *pNodeInfo = pCfg->nodeInfo + index;
- pNode->peerInfo[index] = syncAddPeer(pNode, pNodeInfo);
- if (pNode->peerInfo[index] == NULL) {
+ for (int32_t idx = 0; idx < pCfg->replica; ++idx) {
+ const SNodeInfo *pNodeInfo = pCfg->nodeInfo + idx;
+ pNode->peerInfo[idx] = syncAddPeer(pNode, pNodeInfo);
+ if (pNode->peerInfo[idx] == NULL) {
sError("vgId:%d, node:%d fqdn:%s port:%u is not configured, stop taosd", pNode->vgId, pNodeInfo->nodeId,
pNodeInfo->nodeFqdn, pNodeInfo->nodePort);
syncStop(pNode->rid);
@@ -210,7 +210,7 @@ int64_t syncStart(const SSyncInfo *pInfo) {
}
if ((strcmp(pNodeInfo->nodeFqdn, tsNodeFqdn) == 0) && (pNodeInfo->nodePort == tsSyncPort)) {
- pNode->selfIndex = index;
+ pNode->selfIndex = idx;
}
}
@@ -256,8 +256,8 @@ int64_t syncStart(const SSyncInfo *pInfo) {
}
syncStartCheckPeerConn(pNode->peerInfo[TAOS_SYNC_MAX_REPLICA]); // arb
- for (int32_t index = 0; index < pNode->replica; ++index) {
- syncStartCheckPeerConn(pNode->peerInfo[index]);
+ for (int32_t idx = 0; idx < pNode->replica; ++idx) {
+ syncStartCheckPeerConn(pNode->peerInfo[idx]);
}
return pNode->rid;
@@ -277,8 +277,8 @@ void syncStop(int64_t rid) {
if (pNode->pFwdTimer) taosTmrStop(pNode->pFwdTimer);
if (pNode->pRoleTimer) taosTmrStop(pNode->pRoleTimer);
- for (int32_t index = 0; index < pNode->replica; ++index) {
- pPeer = pNode->peerInfo[index];
+ for (int32_t idx = 0; idx < pNode->replica; ++idx) {
+ pPeer = pNode->peerInfo[idx];
if (pPeer) syncRemovePeer(pPeer);
}
@@ -303,8 +303,8 @@ int32_t syncReconfig(int64_t rid, const SSyncCfg *pNewCfg) {
pthread_mutex_lock(&pNode->mutex);
syncStopCheckPeerConn(pNode->peerInfo[TAOS_SYNC_MAX_REPLICA]); // arb
- for (int32_t index = 0; index < pNode->replica; ++index) {
- syncStopCheckPeerConn(pNode->peerInfo[index]);
+ for (int32_t idx = 0; idx < pNode->replica; ++idx) {
+ syncStopCheckPeerConn(pNode->peerInfo[idx]);
}
for (i = 0; i < pNode->replica; ++i) {
@@ -364,8 +364,8 @@ int32_t syncReconfig(int64_t rid, const SSyncCfg *pNewCfg) {
}
syncStartCheckPeerConn(pNode->peerInfo[TAOS_SYNC_MAX_REPLICA]); // arb
- for (int32_t index = 0; index < pNode->replica; ++index) {
- syncStartCheckPeerConn(pNode->peerInfo[index]);
+ for (int32_t idx = 0; idx < pNode->replica; ++idx) {
+ syncStartCheckPeerConn(pNode->peerInfo[idx]);
}
pthread_mutex_unlock(&pNode->mutex);
@@ -629,16 +629,16 @@ static SSyncPeer *syncAddPeer(SSyncNode *pNode, const SNodeInfo *pInfo) {
}
void syncBroadcastStatus(SSyncNode *pNode) {
- for (int32_t index = 0; index < pNode->replica; ++index) {
- if (index == pNode->selfIndex) continue;
- SSyncPeer *pPeer = pNode->peerInfo[index];
+ for (int32_t idx = 0; idx < pNode->replica; ++idx) {
+ if (idx == pNode->selfIndex) continue;
+ SSyncPeer *pPeer = pNode->peerInfo[idx];
syncSendPeersStatusMsgToPeer(pPeer, 1, SYNC_STATUS_BROADCAST, syncGenTranId());
}
}
static void syncResetFlowCtrl(SSyncNode *pNode) {
- for (int32_t index = 0; index < pNode->replica; ++index) {
- pNode->peerInfo[index]->numOfRetrieves = 0;
+ for (int32_t idx = 0; idx < pNode->replica; ++idx) {
+ pNode->peerInfo[idx]->numOfRetrieves = 0;
}
if (pNode->notifyFlowCtrlFp) {
@@ -649,7 +649,7 @@ static void syncResetFlowCtrl(SSyncNode *pNode) {
static void syncChooseMaster(SSyncNode *pNode) {
SSyncPeer *pPeer;
int32_t onlineNum = 0;
- int32_t index = -1;
+ int32_t idx = -1;
int32_t replica = pNode->replica;
for (int32_t i = 0; i < pNode->replica; ++i) {
@@ -660,13 +660,13 @@ static void syncChooseMaster(SSyncNode *pNode) {
if (onlineNum == pNode->replica) {
// if all peers are online, peer with highest version shall be master
- index = 0;
+ idx = 0;
for (int32_t i = 1; i < pNode->replica; ++i) {
- if (pNode->peerInfo[i]->version > pNode->peerInfo[index]->version) {
- index = i;
+ if (pNode->peerInfo[i]->version > pNode->peerInfo[idx]->version) {
+ idx = i;
}
}
- sDebug("vgId:%d, master:%s may be choosed, index:%d", pNode->vgId, pNode->peerInfo[index]->id, index);
+ sDebug("vgId:%d, master:%s may be choosed, index:%d", pNode->vgId, pNode->peerInfo[idx]->id, idx);
} else {
sDebug("vgId:%d, no master election since onlineNum:%d replica:%d", pNode->vgId, onlineNum, pNode->replica);
}
@@ -683,26 +683,26 @@ static void syncChooseMaster(SSyncNode *pNode) {
}
}
- if (index < 0 && onlineNum > replica / 2.0) {
+ if (idx < 0 && onlineNum > replica / 2.0) {
// over half of nodes are online
for (int32_t i = 0; i < pNode->replica; ++i) {
// slave with highest version shall be master
pPeer = pNode->peerInfo[i];
if (pPeer->role == TAOS_SYNC_ROLE_SLAVE || pPeer->role == TAOS_SYNC_ROLE_MASTER) {
- if (index < 0 || pPeer->version > pNode->peerInfo[index]->version) {
- index = i;
+ if (idx < 0 || pPeer->version > pNode->peerInfo[idx]->version) {
+ idx = i;
}
}
}
- if (index >= 0) {
+ if (idx >= 0) {
sDebug("vgId:%d, master:%s may be choosed, index:%d onlineNum(arb):%d replica:%d", pNode->vgId,
- pNode->peerInfo[index]->id, index, onlineNum, replica);
+ pNode->peerInfo[idx]->id, idx, onlineNum, replica);
}
}
- if (index >= 0) {
- if (index == pNode->selfIndex) {
+ if (idx >= 0) {
+ if (idx == pNode->selfIndex) {
sInfo("vgId:%d, start to work as master", pNode->vgId);
nodeRole = TAOS_SYNC_ROLE_MASTER;
@@ -712,7 +712,7 @@ static void syncChooseMaster(SSyncNode *pNode) {
syncResetFlowCtrl(pNode);
(*pNode->notifyRoleFp)(pNode->vgId, nodeRole);
} else {
- pPeer = pNode->peerInfo[index];
+ pPeer = pNode->peerInfo[idx];
sInfo("%s, it shall work as master", pPeer->id);
}
} else {
@@ -725,8 +725,8 @@ static SSyncPeer *syncCheckMaster(SSyncNode *pNode) {
int32_t masterIndex = -1;
int32_t replica = pNode->replica;
- for (int32_t index = 0; index < pNode->replica; ++index) {
- if (pNode->peerInfo[index]->role != TAOS_SYNC_ROLE_OFFLINE) {
+ for (int32_t idx = 0; idx < pNode->replica; ++idx) {
+ if (pNode->peerInfo[idx]->role != TAOS_SYNC_ROLE_OFFLINE) {
onlineNum++;
}
}
@@ -751,19 +751,19 @@ static SSyncPeer *syncCheckMaster(SSyncNode *pNode) {
(*pNode->notifyRoleFp)(pNode->vgId, nodeRole);
}
} else {
- for (int32_t index = 0; index < pNode->replica; ++index) {
- SSyncPeer *pTemp = pNode->peerInfo[index];
+ for (int32_t idx = 0; idx < pNode->replica; ++idx) {
+ SSyncPeer *pTemp = pNode->peerInfo[idx];
if (pTemp->role != TAOS_SYNC_ROLE_MASTER) continue;
if (masterIndex < 0) {
- masterIndex = index;
- sDebug("vgId:%d, peer:%s is master, index:%d", pNode->vgId, pTemp->id, index);
+ masterIndex = idx;
+ sDebug("vgId:%d, peer:%s is master, index:%d", pNode->vgId, pTemp->id, idx);
} else { // multiple masters, it shall not happen
if (masterIndex == pNode->selfIndex) {
sError("%s, peer is master, work as slave instead", pTemp->id);
nodeRole = TAOS_SYNC_ROLE_SLAVE;
(*pNode->notifyRoleFp)(pNode->vgId, nodeRole);
} else {
- sError("vgId:%d, peer:%s is master too, masterIndex:%d index:%d", pNode->vgId, pTemp->id, masterIndex, index);
+ sError("vgId:%d, peer:%s is master too, masterIndex:%d index:%d", pNode->vgId, pTemp->id, masterIndex, idx);
}
}
}
@@ -783,9 +783,9 @@ static int32_t syncValidateMaster(SSyncPeer *pPeer) {
(*pNode->notifyRoleFp)(pNode->vgId, nodeRole);
code = -1;
- for (int32_t index = 0; index < pNode->replica; ++index) {
- if (index == pNode->selfIndex) continue;
- syncRestartPeer(pNode->peerInfo[index]);
+ for (int32_t idx = 0; idx < pNode->replica; ++idx) {
+ if (idx == pNode->selfIndex) continue;
+ syncRestartPeer(pNode->peerInfo[idx]);
}
}
@@ -825,15 +825,15 @@ static void syncCheckRole(SSyncPeer *pPeer, SPeerStatus* peersStatus, int8_t new
} else {
// master not there, if all peer's state and version are consistent, choose the master
int32_t consistent = 0;
- int32_t index = 0;
+ int32_t idx = 0;
if (peersStatus != NULL) {
- for (index = 0; index < pNode->replica; ++index) {
- SSyncPeer *pTemp = pNode->peerInfo[index];
- if (pTemp->role != peersStatus[index].role) break;
- if ((pTemp->role != TAOS_SYNC_ROLE_OFFLINE) && (pTemp->version != peersStatus[index].version)) break;
+ for (idx = 0; idx < pNode->replica; ++idx) {
+ SSyncPeer *pTemp = pNode->peerInfo[idx];
+ if (pTemp->role != peersStatus[idx].role) break;
+ if ((pTemp->role != TAOS_SYNC_ROLE_OFFLINE) && (pTemp->version != peersStatus[idx].version)) break;
}
- if (index >= pNode->replica) consistent = 1;
+ if (idx >= pNode->replica) consistent = 1;
} else {
if (pNode->replica == 2) consistent = 1;
}
@@ -1331,7 +1331,7 @@ static void syncProcessBrokenLink(int64_t rid, int32_t closedByApp) {
static int32_t syncSaveFwdInfo(SSyncNode *pNode, uint64_t _version, void *mhandle) {
SSyncFwds *pSyncFwds = pNode->pSyncFwds;
- int64_t time = taosGetTimestampMs();
+ int64_t lastTime = taosGetTimestampMs();
if (pSyncFwds->fwds >= SYNC_MAX_FWDS) {
// pSyncFwds->first = (pSyncFwds->first + 1) % SYNC_MAX_FWDS;
@@ -1348,7 +1348,7 @@ static int32_t syncSaveFwdInfo(SSyncNode *pNode, uint64_t _version, void *mhandl
memset(pFwdInfo, 0, sizeof(SFwdInfo));
pFwdInfo->version = _version;
pFwdInfo->mhandle = mhandle;
- pFwdInfo->time = time;
+ pFwdInfo->time = lastTime;
pSyncFwds->fwds++;
sTrace("vgId:%d, fwd info is saved, hver:%" PRIu64 " fwds:%d ", pNode->vgId, _version, pSyncFwds->fwds);
@@ -1400,10 +1400,10 @@ static void syncMonitorNodeRole(void *param, void *tmrId) {
SSyncNode *pNode = syncAcquireNode(rid);
if (pNode == NULL) return;
- for (int32_t index = 0; index < pNode->replica; index++) {
- if (index == pNode->selfIndex) continue;
+ for (int32_t idx = 0; idx < pNode->replica; idx++) {
+ if (idx == pNode->selfIndex) continue;
- SSyncPeer *pPeer = pNode->peerInfo[index];
+ SSyncPeer *pPeer = pNode->peerInfo[idx];
if (/*pPeer->role > TAOS_SYNC_ROLE_UNSYNCED && */ nodeRole > TAOS_SYNC_ROLE_UNSYNCED) continue;
if (/*pPeer->sstatus > TAOS_SYNC_STATUS_INIT || */ nodeSStatus > TAOS_SYNC_STATUS_INIT) continue;
@@ -1425,16 +1425,16 @@ static void syncMonitorFwdInfos(void *param, void *tmrId) {
SSyncFwds *pSyncFwds = pNode->pSyncFwds;
if (pSyncFwds) {
- int64_t time = taosGetTimestampMs();
+ int64_t lastTime = taosGetTimestampMs();
if (pSyncFwds->fwds > 0) {
pthread_mutex_lock(&pNode->mutex);
for (int32_t i = 0; i < pSyncFwds->fwds; ++i) {
SFwdInfo *pFwdInfo = pSyncFwds->fwdInfo + (pSyncFwds->first + i) % SYNC_MAX_FWDS;
- if (ABS(time - pFwdInfo->time) < 10000) break;
+ if (ABS(lastTime - pFwdInfo->time) < 10000) break;
sDebug("vgId:%d, forward info expired, hver:%" PRIu64 " curtime:%" PRIu64 " savetime:%" PRIu64, pNode->vgId,
- pFwdInfo->version, time, pFwdInfo->time);
+ pFwdInfo->version, lastTime, pFwdInfo->time);
syncProcessFwdAck(pNode, pFwdInfo, TSDB_CODE_SYN_CONFIRM_EXPIRED);
}
diff --git a/src/sync/src/syncRetrieve.c b/src/sync/src/syncRetrieve.c
index 623d6e3cc0c20ef3c69b4ebfb6752616c1ff56b0..f0fcf6d6dd34e50a9810878aa7fbed2905a7f615 100644
--- a/src/sync/src/syncRetrieve.c
+++ b/src/sync/src/syncRetrieve.c
@@ -228,7 +228,7 @@ static int64_t syncRetrieveLastWal(SSyncPeer *pPeer, char *name, uint64_t fversi
return code;
}
-static int64_t syncProcessLastWal(SSyncPeer *pPeer, char *wname, int64_t index) {
+static int64_t syncProcessLastWal(SSyncPeer *pPeer, char *wname, int64_t idx) {
SSyncNode *pNode = pPeer->pSyncNode;
int32_t once = 0; // last WAL has once ever been processed
int64_t offset = 0;
@@ -290,12 +290,12 @@ static int64_t syncRetrieveWal(SSyncPeer *pPeer) {
char wname[TSDB_FILENAME_LEN * 2];
int32_t size;
int64_t code = -1;
- int64_t index = 0;
+ int64_t idx = 0;
while (1) {
// retrieve wal info
wname[0] = 0;
- code = (*pNode->getWalInfoFp)(pNode->vgId, wname, &index);
+ code = (*pNode->getWalInfoFp)(pNode->vgId, wname, &idx);
if (code < 0) {
sError("%s, failed to get wal info since:%s, code:0x%" PRIx64, pPeer->id, strerror(errno), code);
break;
@@ -308,7 +308,7 @@ static int64_t syncRetrieveWal(SSyncPeer *pPeer) {
}
if (code == 0) { // last wal
- code = syncProcessLastWal(pPeer, wname, index);
+ code = syncProcessLastWal(pPeer, wname, idx);
sInfo("%s, last wal processed, code:%" PRId64, pPeer->id, code);
break;
}
@@ -317,14 +317,14 @@ static int64_t syncRetrieveWal(SSyncPeer *pPeer) {
snprintf(fname, sizeof(fname), "%s/%s", pNode->path, wname);
// send wal file, old wal file won't be modified, even remove is ok
- struct stat fstat;
- if (stat(fname, &fstat) < 0) {
+ struct stat fstatus;
+ if (stat(fname, &fstatus) < 0) {
code = -1;
sInfo("%s, failed to stat wal:%s for retrieve since %s, code:0x%" PRIx64, pPeer->id, fname, strerror(errno), code);
break;
}
- size = fstat.st_size;
+ size = fstatus.st_size;
sInfo("%s, retrieve wal:%s size:%d", pPeer->id, fname, size);
int32_t sfd = open(fname, O_RDONLY | O_BINARY);
diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c
index 6b19fdf3c1ffad31efeb06497c05fa2740636c01..3abc3e9acc6c8f6e909d4d6ef5f043dc2ee3e156 100644
--- a/src/tsdb/src/tsdbCommit.c
+++ b/src/tsdb/src/tsdbCommit.c
@@ -275,7 +275,7 @@ int tsdbWriteBlockIdx(SDFile *pHeadf, SArray *pIdxA, void **ppBuf) {
// =================== Commit Meta Data
-static int tsdbInitCommitMetaFile(STsdbRepo *pRepo, SMFile* pMf, bool open) {
+static int tsdbInitCommitMetaFile(STsdbRepo *pRepo, SMFile* pMf, bool bOpen) {
STsdbFS * pfs = REPO_FS(pRepo);
SMFile * pOMFile = pfs->cstatus->pmf;
SDiskID did;
@@ -287,7 +287,7 @@ static int tsdbInitCommitMetaFile(STsdbRepo *pRepo, SMFile* pMf, bool open) {
did.id = TFS_PRIMARY_ID;
tsdbInitMFile(pMf, did, REPO_ID(pRepo), FS_TXN_VERSION(REPO_FS(pRepo)));
- if (open && tsdbCreateMFile(pMf, true) < 0) {
+ if (bOpen && tsdbCreateMFile(pMf, true) < 0) {
tsdbError("vgId:%d failed to create META file since %s", REPO_ID(pRepo), tstrerror(terrno));
return -1;
}
@@ -295,7 +295,7 @@ static int tsdbInitCommitMetaFile(STsdbRepo *pRepo, SMFile* pMf, bool open) {
tsdbInfo("vgId:%d meta file %s is created to commit", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pMf));
} else {
tsdbInitMFileEx(pMf, pOMFile);
- if (open && tsdbOpenMFile(pMf, O_WRONLY) < 0) {
+ if (bOpen && tsdbOpenMFile(pMf, O_WRONLY) < 0) {
tsdbError("vgId:%d failed to open META file since %s", REPO_ID(pRepo), tstrerror(terrno));
return -1;
}
@@ -1813,4 +1813,4 @@ int tsdbCommitControl(STsdbRepo* pRepo, SControlDataInfo* pCtlDataInfo) {
tsem_post(&pRepo->readyToCommit);
return ret;
-}
\ No newline at end of file
+}
diff --git a/src/tsdb/src/tsdbFS.c b/src/tsdb/src/tsdbFS.c
index b3d52e8fad1c81c55003470d024fd48278384f8c..bfeb61e4f1ab3141a8d05cba1a84bac6f4c72669 100644
--- a/src/tsdb/src/tsdbFS.c
+++ b/src/tsdb/src/tsdbFS.c
@@ -1217,13 +1217,13 @@ static int tsdbRestoreDFileSet(STsdbRepo *pRepo) {
bool isOneFSetFinish = true;
int lastFType = -1;
// one fileset ends when (1) the array ends or (2) encounter different fid
- for (size_t index = 0; index < fArraySize; ++index) {
+ for (size_t idx = 0; idx < fArraySize; ++idx) {
int tvid = -1, tfid = -1;
TSDB_FILE_T ttype = TSDB_FILE_MAX;
uint32_t tversion = -1;
char bname[TSDB_FILENAME_LEN] = "\0";
- pf = taosArrayGet(fArray, index);
+ pf = taosArrayGet(fArray, idx);
tfsbasename(pf, bname);
tsdbParseDFilename(bname, &tvid, &tfid, &ttype, &tversion);
ASSERT(tvid == REPO_ID(pRepo));
@@ -1237,7 +1237,7 @@ static int tsdbRestoreDFileSet(STsdbRepo *pRepo) {
lastFType = ttype;
- if (index == 0) {
+ if (idx == 0) {
memset(&fset, 0, sizeof(SDFileSet));
TSDB_FSET_SET_CLOSED(&fset);
nDFiles = 1;
@@ -1249,7 +1249,7 @@ static int tsdbRestoreDFileSet(STsdbRepo *pRepo) {
++nDFiles;
pDFile->f = *pf;
// (1) the array ends
- if (index == fArraySize - 1) {
+ if (idx == fArraySize - 1) {
if (tsdbIsDFileSetValid(nDFiles)) {
tsdbInfo("vgId:%d DFileSet %d is fetched, nDFiles=%" PRIu8, REPO_ID(pRepo), fset.fid, nDFiles);
isOneFSetFinish = true;
diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c
index 2ae215ad36288349c41d18ad000823063da37805..63ea4ab6df0e84e81a9308de509ccd24c933c54e 100644
--- a/src/tsdb/src/tsdbMain.c
+++ b/src/tsdb/src/tsdbMain.c
@@ -128,7 +128,7 @@ int tsdbCloseRepo(STsdbRepo *repo, int toCommit) {
tsdbStopStream(pRepo);
if(pRepo->pthread){
- taosDestoryThread(pRepo->pthread);
+ taosDestroyThread(pRepo->pthread);
pRepo->pthread = NULL;
}
@@ -344,7 +344,7 @@ int32_t tsdbConfigRepo(STsdbRepo *repo, STsdbCfg *pCfg) {
#endif
}
-uint32_t tsdbGetFileInfo(STsdbRepo *repo, char *name, uint32_t *index, uint32_t eindex, int64_t *size) {
+uint32_t tsdbGetFileInfo(STsdbRepo *repo, char *name, uint32_t *idx, uint32_t eindex, int64_t *size) {
// TODO
return 0;
#if 0
@@ -356,16 +356,16 @@ uint32_t tsdbGetFileInfo(STsdbRepo *repo, char *name, uint32_t *index, uint32_t
struct stat fState;
- tsdbDebug("vgId:%d name:%s index:%d eindex:%d", pRepo->config.tsdbId, name, *index, eindex);
- ASSERT(*index <= eindex);
+ tsdbDebug("vgId:%d name:%s index:%d eindex:%d", pRepo->config.tsdbId, name, *idx, eindex);
+ ASSERT(*idx <= eindex);
if (name[0] == 0) { // get the file from index or after, but not larger than eindex
- int fid = (*index) / TSDB_FILE_TYPE_MAX;
+ int fid = (*idx) / TSDB_FILE_TYPE_MAX;
if (pFileH->nFGroups == 0 || fid > pFileH->pFGroup[pFileH->nFGroups - 1].fileId) {
- if (*index <= TSDB_META_FILE_INDEX && TSDB_META_FILE_INDEX <= eindex) {
+ if (*idx <= TSDB_META_FILE_INDEX && TSDB_META_FILE_INDEX <= eindex) {
fname = tsdbGetMetaFileName(pRepo->rootDir);
- *index = TSDB_META_FILE_INDEX;
+ *idx = TSDB_META_FILE_INDEX;
magic = TSDB_META_FILE_MAGIC(pRepo->tsdbMeta);
sprintf(name, "tsdb/%s", TSDB_META_FILE_NAME);
} else {
@@ -375,7 +375,7 @@ uint32_t tsdbGetFileInfo(STsdbRepo *repo, char *name, uint32_t *index, uint32_t
SFileGroup *pFGroup =
taosbsearch(&fid, pFileH->pFGroup, pFileH->nFGroups, sizeof(SFileGroup), keyFGroupCompFunc, TD_GE);
if (pFGroup->fileId == fid) {
- SFile *pFile = &pFGroup->files[(*index) % TSDB_FILE_TYPE_MAX];
+ SFile *pFile = &pFGroup->files[(*idx) % TSDB_FILE_TYPE_MAX];
fname = strdup(TSDB_FILE_NAME(pFile));
magic = pFile->info.magic;
char *tfname = strdup(fname);
@@ -385,7 +385,7 @@ uint32_t tsdbGetFileInfo(STsdbRepo *repo, char *name, uint32_t *index, uint32_t
if ((pFGroup->fileId + 1) * TSDB_FILE_TYPE_MAX - 1 < (int)eindex) {
SFile *pFile = &pFGroup->files[0];
fname = strdup(TSDB_FILE_NAME(pFile));
- *index = pFGroup->fileId * TSDB_FILE_TYPE_MAX;
+ *idx = pFGroup->fileId * TSDB_FILE_TYPE_MAX;
magic = pFile->info.magic;
char *tfname = strdup(fname);
sprintf(name, "tsdb/%s/%s", TSDB_DATA_DIR_NAME, basename(tfname));
@@ -402,7 +402,7 @@ uint32_t tsdbGetFileInfo(STsdbRepo *repo, char *name, uint32_t *index, uint32_t
tfree(fname);
return 0;
}
- if (*index == TSDB_META_FILE_INDEX) { // get meta file
+ if (*idx == TSDB_META_FILE_INDEX) { // get meta file
tsdbGetStoreInfo(fname, &magic, size);
} else {
char tfname[TSDB_FILENAME_LEN] = "\0";
diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c
index a095bff61e05822f6389a51671f98fc5a33e0bbe..a7aa310152a4bd445ba732b6ae7c8e671263afd4 100644
--- a/src/tsdb/src/tsdbMeta.c
+++ b/src/tsdb/src/tsdbMeta.c
@@ -28,13 +28,13 @@ static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rm
static int tsdbAddTableIntoIndex(STsdbMeta *pMeta, STable *pTable, bool refSuper);
static int tsdbRemoveTableFromIndex(STsdbMeta *pMeta, STable *pTable);
static int tsdbInitTableCfg(STableCfg *config, ETableType type, uint64_t uid, int32_t tid);
-static int tsdbTableSetSchema(STableCfg *config, STSchema *pSchema, bool dup);
-static int tsdbTableSetName(STableCfg *config, char *name, bool dup);
-static int tsdbTableSetTagSchema(STableCfg *config, STSchema *pSchema, bool dup);
-static int tsdbTableSetSName(STableCfg *config, char *sname, bool dup);
+static int tsdbTableSetSchema(STableCfg *config, STSchema *pSchema, bool duplicate);
+static int tsdbTableSetName(STableCfg *config, char *name, bool duplicate);
+static int tsdbTableSetTagSchema(STableCfg *config, STSchema *pSchema, bool duplicate);
+static int tsdbTableSetSName(STableCfg *config, char *sname, bool duplicate);
static int tsdbTableSetSuperUid(STableCfg *config, uint64_t uid);
-static int tsdbTableSetTagValue(STableCfg *config, SKVRow row, bool dup);
-static int tsdbTableSetStreamSql(STableCfg *config, char *sql, bool dup);
+static int tsdbTableSetTagValue(STableCfg *config, SKVRow row, bool duplicate);
+static int tsdbTableSetStreamSql(STableCfg *config, char *sql, bool duplicate);
static int tsdbEncodeTableName(void **buf, tstr *name);
static void * tsdbDecodeTableName(void *buf, tstr **name);
static int tsdbEncodeTable(void **buf, STable *pTable);
@@ -1236,8 +1236,8 @@ static int tsdbInitTableCfg(STableCfg *config, ETableType type, uint64_t uid, in
return 0;
}
-static int tsdbTableSetSchema(STableCfg *config, STSchema *pSchema, bool dup) {
- if (dup) {
+static int tsdbTableSetSchema(STableCfg *config, STSchema *pSchema, bool duplicate) {
+ if (duplicate) {
config->schema = tdDupSchema(pSchema);
if (config->schema == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
@@ -1249,8 +1249,8 @@ static int tsdbTableSetSchema(STableCfg *config, STSchema *pSchema, bool dup) {
return 0;
}
-static int tsdbTableSetName(STableCfg *config, char *name, bool dup) {
- if (dup) {
+static int tsdbTableSetName(STableCfg *config, char *name, bool duplicate) {
+ if (duplicate) {
config->name = strdup(name);
if (config->name == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
@@ -1263,13 +1263,13 @@ static int tsdbTableSetName(STableCfg *config, char *name, bool dup) {
return 0;
}
-static int tsdbTableSetTagSchema(STableCfg *config, STSchema *pSchema, bool dup) {
+static int tsdbTableSetTagSchema(STableCfg *config, STSchema *pSchema, bool duplicate) {
if (config->type != TSDB_CHILD_TABLE) {
terrno = TSDB_CODE_TDB_INVALID_CREATE_TB_MSG;
return -1;
}
- if (dup) {
+ if (duplicate) {
config->tagSchema = tdDupSchema(pSchema);
if (config->tagSchema == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
@@ -1281,13 +1281,13 @@ static int tsdbTableSetTagSchema(STableCfg *config, STSchema *pSchema, bool dup)
return 0;
}
-static int tsdbTableSetSName(STableCfg *config, char *sname, bool dup) {
+static int tsdbTableSetSName(STableCfg *config, char *sname, bool duplicate) {
if (config->type != TSDB_CHILD_TABLE) {
terrno = TSDB_CODE_TDB_INVALID_CREATE_TB_MSG;
return -1;
}
- if (dup) {
+ if (duplicate) {
config->sname = strdup(sname);
if (config->sname == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
@@ -1309,13 +1309,13 @@ static int tsdbTableSetSuperUid(STableCfg *config, uint64_t uid) {
return 0;
}
-static int tsdbTableSetTagValue(STableCfg *config, SKVRow row, bool dup) {
+static int tsdbTableSetTagValue(STableCfg *config, SKVRow row, bool duplicate) {
if (config->type != TSDB_CHILD_TABLE) {
terrno = TSDB_CODE_TDB_INVALID_CREATE_TB_MSG;
return -1;
}
- if (dup) {
+ if (duplicate) {
config->tagValues = tdKVRowDup(row);
if (config->tagValues == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
@@ -1328,13 +1328,13 @@ static int tsdbTableSetTagValue(STableCfg *config, SKVRow row, bool dup) {
return 0;
}
-static int tsdbTableSetStreamSql(STableCfg *config, char *sql, bool dup) {
+static int tsdbTableSetStreamSql(STableCfg *config, char *sql, bool duplicate) {
if (config->type != TSDB_STREAM_TABLE) {
terrno = TSDB_CODE_TDB_INVALID_CREATE_TB_MSG;
return -1;
}
- if (dup) {
+ if (duplicate) {
config->sql = strdup(sql);
if (config->sql == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c
index 567b7ab85b3e82defea5d26bc812553beb09ef6b..3d72a7bde3bb87eca567819503ceb9746968768b 100644
--- a/src/tsdb/src/tsdbRead.c
+++ b/src/tsdb/src/tsdbRead.c
@@ -986,7 +986,9 @@ static SMemRow getSMemRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order,
return rmem;
} else {
pCheckInfo->chosen = CHECKINFO_CHOSEN_BOTH;
- *extraRow = rimem;
+ if (extraRow) {
+ *extraRow = rimem;
+ }
return rmem;
}
} else {
@@ -2589,10 +2591,10 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO
while (numOfTotal < cnt) {
int32_t pos = pTree->pNode[0].index;
- int32_t index = sup.blockIndexArray[pos]++;
+ int32_t idx = sup.blockIndexArray[pos]++;
STableBlockInfo* pBlocksInfo = sup.pDataBlockInfo[pos];
- pQueryHandle->pDataBlockInfo[numOfTotal++] = pBlocksInfo[index];
+ pQueryHandle->pDataBlockInfo[numOfTotal++] = pBlocksInfo[idx];
// set data block index overflow, in order to disable the offset comparator
if (sup.blockIndexArray[pos] >= sup.numOfBlocksPerTable[pos]) {
@@ -3331,7 +3333,7 @@ static bool loadDataBlockFromTableSeq(STsdbQueryHandle* pQueryHandle) {
size_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
assert(numOfTables > 0);
- int64_t stime = taosGetTimestampUs();
+ int64_t lastTime = taosGetTimestampUs();
while(pQueryHandle->activeIndex < numOfTables) {
if (loadBlockOfActiveTable(pQueryHandle)) {
@@ -3349,7 +3351,7 @@ static bool loadDataBlockFromTableSeq(STsdbQueryHandle* pQueryHandle) {
terrno = TSDB_CODE_SUCCESS;
- int64_t elapsedTime = taosGetTimestampUs() - stime;
+ int64_t elapsedTime = taosGetTimestampUs() - lastTime;
pQueryHandle->cost.checkForNextTime += elapsedTime;
}
@@ -3368,8 +3370,8 @@ bool tsdbNextDataBlock(TsdbQueryHandleT pHandle) {
return false;
}
- int64_t stime = taosGetTimestampUs();
- int64_t elapsedTime = stime;
+ int64_t lastTime = taosGetTimestampUs();
+ int64_t elapsedTime = lastTime;
// TODO refactor: remove "type"
if (pQueryHandle->type == TSDB_QUERY_TYPE_LAST) {
@@ -3396,7 +3398,7 @@ bool tsdbNextDataBlock(TsdbQueryHandleT pHandle) {
}
if (exists) {
- pQueryHandle->cost.checkForNextTime += (taosGetTimestampUs() - stime);
+ pQueryHandle->cost.checkForNextTime += (taosGetTimestampUs() - lastTime);
return exists;
}
@@ -3408,7 +3410,7 @@ bool tsdbNextDataBlock(TsdbQueryHandleT pHandle) {
bool ret = doHasDataInBuffer(pQueryHandle);
terrno = TSDB_CODE_SUCCESS;
- elapsedTime = taosGetTimestampUs() - stime;
+ elapsedTime = taosGetTimestampUs() - lastTime;
pQueryHandle->cost.checkForNextTime += elapsedTime;
return ret;
}
@@ -3757,7 +3759,7 @@ int32_t tsdbRetrieveDataBlockStatisInfo(TsdbQueryHandleT* pQueryHandle, SDataSta
return TSDB_CODE_SUCCESS;
}
- int64_t stime = taosGetTimestampUs();
+ int64_t lastTime = taosGetTimestampUs();
int statisStatus = tsdbLoadBlockStatis(&pHandle->rhelper, pBlockInfo->compBlock);
if (statisStatus < TSDB_STATIS_OK) {
return terrno;
@@ -3791,7 +3793,7 @@ int32_t tsdbRetrieveDataBlockStatisInfo(TsdbQueryHandleT* pQueryHandle, SDataSta
}
}
- int64_t elapsed = taosGetTimestampUs() - stime;
+ int64_t elapsed = taosGetTimestampUs() - lastTime;
pHandle->cost.statisInfoLoadTime += elapsed;
*pBlockStatis = pHandle->statis;
diff --git a/src/util/inc/tthread.h b/src/util/inc/tthread.h
index 7443ad706dcbef529d857fe823cddd0cc1efbdd3..9ef1c230359c154d54f7c577a3387cea0d57c551 100644
--- a/src/util/inc/tthread.h
+++ b/src/util/inc/tthread.h
@@ -26,7 +26,7 @@ extern "C" {
// create new thread
pthread_t* taosCreateThread( void *(*__start_routine) (void *), void* param);
// destory thread
-bool taosDestoryThread(pthread_t* pthread);
+bool taosDestroyThread(pthread_t* pthread);
// thread running return true
bool taosThreadRunning(pthread_t* pthread);
diff --git a/src/util/src/hash.c b/src/util/src/hash.c
index e2fd37fdc41479743d21e43f451c4fc4270b01d8..d4d42976155cb1e11b4abdf6c1d6fa6855921971 100644
--- a/src/util/src/hash.c
+++ b/src/util/src/hash.c
@@ -47,7 +47,7 @@ typedef struct SHashEntry {
SHashNode *next;
} SHashEntry;
-typedef struct SHashObj {
+struct SHashObj {
SHashEntry **hashList;
size_t capacity; // number of slots
size_t size; // number of elements in hash table
@@ -58,7 +58,7 @@ typedef struct SHashObj {
SHashLockTypeE type; // lock type
bool enableUpdate; // enable update
SArray *pMemBlock; // memory block allocated for SHashEntry
-} SHashObj;
+};
/*
* Function definition
@@ -303,7 +303,7 @@ int32_t taosHashGetSize(const SHashObj *pHashObj) {
if (pHashObj == NULL) {
return 0;
}
- return (int32_t)atomic_load_64(&pHashObj->size);
+ return (int32_t)atomic_load_64((int32_t *) &pHashObj->size);
}
static FORCE_INLINE bool taosHashTableEmpty(const SHashObj *pHashObj) {
diff --git a/src/util/src/tarray.c b/src/util/src/tarray.c
index 20f6d5b250264e61b4be87061370e3f48ed2a924..efccf7dff8f1730a29d5306c7ff1ff8d46ece341 100644
--- a/src/util/src/tarray.c
+++ b/src/util/src/tarray.c
@@ -180,15 +180,15 @@ void* taosArrayPop(SArray* pArray) {
return TARRAY_GET_ELEM(pArray, pArray->size);
}
-void* taosArrayGet(const SArray* pArray, size_t index) {
- assert(index < pArray->size);
- return TARRAY_GET_ELEM(pArray, index);
+void* taosArrayGet(const SArray* pArray, size_t idx) {
+ assert(idx < pArray->size);
+ return TARRAY_GET_ELEM(pArray, idx);
}
-void* taosArrayGetP(const SArray* pArray, size_t index) {
- assert(index < pArray->size);
+void* taosArrayGetP(const SArray* pArray, size_t idx) {
+ assert(idx < pArray->size);
- void* d = TARRAY_GET_ELEM(pArray, index);
+ void* d = TARRAY_GET_ELEM(pArray, idx);
return *(void**)d;
}
@@ -204,12 +204,12 @@ void taosArraySetSize(SArray* pArray, size_t size) {
pArray->size = size;
}
-void* taosArrayInsert(SArray* pArray, size_t index, void* pData) {
+void* taosArrayInsert(SArray* pArray, size_t idx, void* pData) {
if (pArray == NULL || pData == NULL) {
return NULL;
}
- if (index >= pArray->size) {
+ if (idx >= pArray->size) {
return taosArrayPush(pArray, pData);
}
@@ -221,9 +221,9 @@ void* taosArrayInsert(SArray* pArray, size_t index, void* pData) {
}
}
- void* dst = TARRAY_GET_ELEM(pArray, index);
+ void* dst = TARRAY_GET_ELEM(pArray, idx);
- int32_t remain = (int32_t)(pArray->size - index);
+ int32_t remain = (int32_t)(pArray->size - idx);
memmove((char*)dst + pArray->elemSize, (char*)dst, pArray->elemSize * remain);
memcpy(dst, pData, pArray->elemSize);
@@ -232,21 +232,21 @@ void* taosArrayInsert(SArray* pArray, size_t index, void* pData) {
return dst;
}
-void taosArraySet(SArray* pArray, size_t index, void* pData) {
- assert(index < pArray->size);
- memcpy(TARRAY_GET_ELEM(pArray, index), pData, pArray->elemSize);
+void taosArraySet(SArray* pArray, size_t idx, void* pData) {
+ assert(idx < pArray->size);
+ memcpy(TARRAY_GET_ELEM(pArray, idx), pData, pArray->elemSize);
}
-void taosArrayRemove(SArray* pArray, size_t index) {
- assert(index < pArray->size);
+void taosArrayRemove(SArray* pArray, size_t idx) {
+ assert(idx < pArray->size);
- if (index == pArray->size - 1) {
+ if (idx == pArray->size - 1) {
taosArrayPop(pArray);
return;
}
- size_t remain = pArray->size - index - 1;
- memmove((char*)pArray->pData + index * pArray->elemSize, (char*)pArray->pData + (index + 1) * pArray->elemSize, remain * pArray->elemSize);
+ size_t remain = pArray->size - idx - 1;
+ memmove((char*)pArray->pData + idx * pArray->elemSize, (char*)pArray->pData + (idx + 1) * pArray->elemSize, remain * pArray->elemSize);
pArray->size -= 1;
}
diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c
index 39b674fe4f23b1e5f4ef034df7d5bb1f654d80e7..6fac32e22df06007ae7b90eb02ceb5a6fdb9e1ec 100644
--- a/src/util/src/tcache.c
+++ b/src/util/src/tcache.c
@@ -624,9 +624,9 @@ void taosTrashcanEmpty(SCacheObj *pCacheObj, bool force) {
return;
}
- const char* stat[] = {"false", "true"};
+ const char* status[] = {"false", "true"};
uDebug("cache:%s start to cleanup trashcan, numOfElem in trashcan:%d, free:%s", pCacheObj->name,
- pCacheObj->numOfElemsInTrash, (force? stat[1]:stat[0]));
+ pCacheObj->numOfElemsInTrash, (force? status[1]:status[0]));
STrashElem *pElem = pCacheObj->pTrash;
while (pElem) {
@@ -683,10 +683,10 @@ bool travHashTableFn(void* param, void* data) {
return true;
}
-static void doCacheRefresh(SCacheObj* pCacheObj, int64_t time, __cache_trav_fn_t fp, void* param1) {
+static void doCacheRefresh(SCacheObj* pCacheObj, int64_t timeStamp, __cache_trav_fn_t fp, void* param1) {
assert(pCacheObj != NULL);
- SHashTravSupp sup = {.pCacheObj = pCacheObj, .fp = fp, .time = time, .param1 = param1};
+ SHashTravSupp sup = {.pCacheObj = pCacheObj, .fp = fp, .time = timeStamp, .param1 = param1};
taosHashCondTraverse(pCacheObj->pHashTable, travHashTableFn, &sup);
}
diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c
index 2ab5ddbbe0ac1025be72e69fe88050d2aa3f73ad..565878f3e151e71a0fcf9b5b3b4b182249f51a79 100644
--- a/src/util/src/tcompare.c
+++ b/src/util/src/tcompare.c
@@ -436,9 +436,9 @@ int WCSPatternMatch(const uint32_t *patterStr, const uint32_t *str, size_t size,
return TSDB_PATTERN_MATCH;
}
- uint32_t accept[3] = {towupper(c), towlower(c), 0};
+ uint32_t accept_array[3] = {towupper(c), towlower(c), 0};
while (1) {
- size_t n = taosWcscspn(str, accept);
+ size_t n = taosWcscspn(str, accept_array);
str += n;
if (str[0] == 0 || (n >= size)) {
diff --git a/src/util/src/tmempool.c b/src/util/src/tmempool.c
index 678c965eb1a7315977616778c0e4b39ceb4c7525..b580f9d9ab3cfafa4e957e26334cd1e728387eb9 100644
--- a/src/util/src/tmempool.c
+++ b/src/util/src/tmempool.c
@@ -89,19 +89,19 @@ char *taosMemPoolMalloc(mpool_h handle) {
}
void taosMemPoolFree(mpool_h handle, char *pMem) {
- int index;
+ int idx;
pool_t *pool_p = (pool_t *)handle;
if (pMem == NULL) return;
- index = (int)(pMem - pool_p->pool) % pool_p->blockSize;
- if (index != 0) {
+ idx = (int)(pMem - pool_p->pool) % pool_p->blockSize;
+ if (idx != 0) {
uError("invalid free address:%p\n", pMem);
return;
}
- index = (int)((pMem - pool_p->pool) / pool_p->blockSize);
- if (index < 0 || index >= pool_p->numOfBlock) {
+ idx = (int)((pMem - pool_p->pool) / pool_p->blockSize);
+ if (idx < 0 || idx >= pool_p->numOfBlock) {
uError("mempool: error, invalid address:%p\n", pMem);
return;
}
@@ -110,7 +110,7 @@ void taosMemPoolFree(mpool_h handle, char *pMem) {
pthread_mutex_lock(&pool_p->mutex);
- pool_p->freeList[(pool_p->first + pool_p->numOfFree) % pool_p->numOfBlock] = index;
+ pool_p->freeList[(pool_p->first + pool_p->numOfFree) % pool_p->numOfBlock] = idx;
pool_p->numOfFree++;
pthread_mutex_unlock(&pool_p->mutex);
diff --git a/src/util/src/tnettest.c b/src/util/src/tnettest.c
index 2094c3d4be59c5636c6f987a790c163bd61c2227..407884759a633c8947f763fd5d5d167a4eb264c8 100644
--- a/src/util/src/tnettest.c
+++ b/src/util/src/tnettest.c
@@ -314,27 +314,27 @@ static void taosNetCheckPort(uint32_t hostIp, int32_t startPort, int32_t endPort
}
void *taosNetInitRpc(char *secretEncrypt, char spi) {
- SRpcInit rpcInit;
+ SRpcInit rpcInitial;
void * pRpcConn = NULL;
char user[] = "nettestinternal";
char pass[] = "nettestinternal";
taosEncryptPass((uint8_t *)pass, strlen(pass), secretEncrypt);
- memset(&rpcInit, 0, sizeof(rpcInit));
- rpcInit.localPort = 0;
- rpcInit.label = "NT";
- rpcInit.numOfThreads = 1; // every DB connection has only one thread
- rpcInit.cfp = NULL;
- rpcInit.sessions = 16;
- rpcInit.connType = TAOS_CONN_CLIENT;
- rpcInit.user = user;
- rpcInit.idleTime = 2000;
- rpcInit.ckey = "key";
- rpcInit.spi = spi;
- rpcInit.secret = secretEncrypt;
-
- pRpcConn = rpcOpen(&rpcInit);
+ memset(&rpcInitial, 0, sizeof(rpcInitial));
+ rpcInitial.localPort = 0;
+ rpcInitial.label = "NT";
+ rpcInitial.numOfThreads = 1; // every DB connection has only one thread
+ rpcInitial.cfp = NULL;
+ rpcInitial.sessions = 16;
+ rpcInitial.connType = TAOS_CONN_CLIENT;
+ rpcInitial.user = user;
+ rpcInitial.idleTime = 2000;
+ rpcInitial.ckey = "key";
+ rpcInitial.spi = spi;
+ rpcInitial.secret = secretEncrypt;
+
+ pRpcConn = rpcOpen(&rpcInitial);
return pRpcConn;
}
diff --git a/src/util/src/tqueue.c b/src/util/src/tqueue.c
index 1ffa94b0df6b63dac914649c7003d37bbedbdb24..7b23b708b1ea9b480d8b7fecfb5e41bbdbafe9b7 100644
--- a/src/util/src/tqueue.c
+++ b/src/util/src/tqueue.c
@@ -86,9 +86,8 @@ void taosCloseQueue(taos_queue param) {
}
pthread_mutex_destroy(&queue->mutex);
- free(queue);
-
uTrace("queue:%p is closed", queue);
+ free(queue);
}
void *taosAllocateQitem(int size) {
diff --git a/src/util/src/tref.c b/src/util/src/tref.c
index 33323889c68162219b3c6faf886ac29b2a975ffa..bff8b12aaefc1734318e891efab7a9b02e6557f4 100644
--- a/src/util/src/tref.c
+++ b/src/util/src/tref.c
@@ -54,7 +54,7 @@ static void taosLockList(int64_t *lockedBy);
static void taosUnlockList(int64_t *lockedBy);
static void taosIncRsetCount(SRefSet *pSet);
static void taosDecRsetCount(SRefSet *pSet);
-static int taosDecRefCount(int rsetId, int64_t rid, int remove);
+static int taosDecRefCount(int rsetId, int64_t rid, int rm);
int taosOpenRef(int max, void (*fp)(void *))
{
@@ -389,7 +389,7 @@ int taosListRef() {
return num;
}
-static int taosDecRefCount(int rsetId, int64_t rid, int remove) {
+static int taosDecRefCount(int rsetId, int64_t rid, int rm) {
int hash;
SRefSet *pSet;
SRefNode *pNode;
@@ -428,7 +428,7 @@ static int taosDecRefCount(int rsetId, int64_t rid, int remove) {
if (pNode) {
pNode->count--;
- if (remove) pNode->removed = 1;
+ if (rm) pNode->removed = 1;
if (pNode->count <= 0) {
if (pNode->prev) {
diff --git a/src/util/src/tthread.c b/src/util/src/tthread.c
index 043b2de2f241297d209041294428dde2c55e974e..f77dea592e8454dcc15e05f5c03c9db56e0ccc6b 100644
--- a/src/util/src/tthread.c
+++ b/src/util/src/tthread.c
@@ -38,7 +38,7 @@ pthread_t* taosCreateThread( void *(*__start_routine) (void *), void* param) {
}
// destory thread
-bool taosDestoryThread(pthread_t* pthread) {
+bool taosDestroyThread(pthread_t* pthread) {
if(pthread == NULL) return false;
if(taosThreadRunning(pthread)) {
pthread_cancel(*pthread);
diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c
index 81ab56ccea1a585ae7bf89a57244edd25818c0d5..f215453f740b979e5b71a4d59a2698b6dd569ff7 100644
--- a/src/vnode/src/vnodeMain.c
+++ b/src/vnode/src/vnodeMain.c
@@ -461,7 +461,7 @@ void vnodeStopWaitingThread(SVnodeObj* pVnode) {
if(loop == 0) {
vInfo("vgId:%d :SDEL force kill thread to quit. pthread=%p pWrite=%p", pVnode->vgId, pWaitThread->pthread, pWaitThread->param);
// thread not stop , so need kill
- taosDestoryThread(pWaitThread->pthread);
+ taosDestroyThread(pWaitThread->pthread);
// write msg need remove from queue
SVWriteMsg* pWrite = (SVWriteMsg* )pWaitThread->param;
if (pWrite)
@@ -586,9 +586,9 @@ void vnodeCleanUp(SVnodeObj *pVnode) {
// stop replication module
if (pVnode->sync > 0) {
- int64_t sync = pVnode->sync;
+ int64_t syncRid = pVnode->sync;
pVnode->sync = -1;
- syncStop(sync);
+ syncStop(syncRid);
}
vDebug("vgId:%d, vnode is cleaned, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode);
@@ -692,4 +692,4 @@ bool vnodeWaitTooMany(void* vparam) {
tsem_t* vnodeSemWait(void* vparam) {
SVnodeObj* pVnode = (SVnodeObj* )vparam;
return &pVnode->semWait;
-}
\ No newline at end of file
+}
diff --git a/src/vnode/src/vnodeSync.c b/src/vnode/src/vnodeSync.c
index 2bdfd2ead3a31d8c2cba94d93239de965d2e07dc..6edcadcf715907bc69090c0e6b3a396020057c8a 100644
--- a/src/vnode/src/vnodeSync.c
+++ b/src/vnode/src/vnodeSync.c
@@ -22,7 +22,7 @@
#include "vnodeMain.h"
#include "vnodeStatus.h"
-uint32_t vnodeGetFileInfo(int32_t vgId, char *name, uint32_t *index, uint32_t eindex, int64_t *size, uint64_t *fver) {
+uint32_t vnodeGetFileInfo(int32_t vgId, char *name, uint32_t *idx, uint32_t eindex, int64_t *size, uint64_t *fver) {
SVnodeObj *pVnode = vnodeAcquire(vgId);
if (pVnode == NULL) {
vError("vgId:%d, vnode not found while get file info", vgId);
@@ -30,7 +30,7 @@ uint32_t vnodeGetFileInfo(int32_t vgId, char *name, uint32_t *index, uint32_t ei
}
*fver = pVnode->fversion;
- uint32_t ret = tsdbGetFileInfo(pVnode->tsdb, name, index, eindex, size);
+ uint32_t ret = tsdbGetFileInfo(pVnode->tsdb, name, idx, eindex, size);
vnodeRelease(pVnode);
return ret;
diff --git a/src/wal/test/waltest.c b/src/wal/test/waltest.c
index 505728fbe4c4a6fbc126aa18ff6db93a28388173..ffb9767bb4333f52237af2ddb1893b129bb2c1af 100644
--- a/src/wal/test/waltest.c
+++ b/src/wal/test/waltest.c
@@ -113,17 +113,17 @@ int main(int argc, char *argv[]) {
printf("%d wal files are written\n", total);
- int64_t index = 0;
+ int64_t idx = 0;
char name[256];
while (1) {
- int code = walGetWalFile(pWal, name, &index);
+ int code = walGetWalFile(pWal, name, &idx);
if (code == -1) {
- printf("failed to get wal file, index:%" PRId64 "\n", index);
+ printf("failed to get wal file, index:%" PRId64 "\n", idx);
break;
}
- printf("index:%" PRId64 " wal:%s\n", index, name);
+ printf("index:%" PRId64 " wal:%s\n", idx, name);
if (code == 0) break;
}
diff --git a/tests/develop-test/3-connectors/R/test.sh b/tests/develop-test/3-connectors/R/test.sh
index 90b94893659f04328d7eaef810018bb6a13c8c09..dd4577a35678b8d3435dc7835c21a926bb00e12d 100644
--- a/tests/develop-test/3-connectors/R/test.sh
+++ b/tests/develop-test/3-connectors/R/test.sh
@@ -22,7 +22,9 @@ cd ../../
WKC=`pwd`
#echo "WKC:${WKC}"
-JDBC_PATH=${WKC}'/src/connector/jdbc/'
+git clone git@github.com:taosdata/taos-connector-jdbc.git --branch 2.0 --single-branch --depth 1
+
+JDBC_PATH=${WKC}'/taos-connector-jdbc/'
CASE_PATH=${WKC}'/tests/examples/R/'
cd ${JDBC_PATH}
#echo "JDBC_PATH:${JDBC_PATH}"
diff --git a/tests/develop-test/3-connectors/c#/test.sh b/tests/develop-test/3-connectors/c#/test.sh
index 8cfb3fe4fcff6ab820b53698e508189e557676ca..f77536c1fd8ba6595788fbcbcb5288bd72764e45 100755
--- a/tests/develop-test/3-connectors/c#/test.sh
+++ b/tests/develop-test/3-connectors/c#/test.sh
@@ -15,25 +15,45 @@ rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
nohup taosd -c /etc/taos/ > /dev/null 2>&1 &
sleep 10
+
+# define fun to check if execute correct.
+check(){
+if [ $1 -eq 0 ]
+then
+ echo "===================$2 succeed==================="
+else
+ echo "===================$2 failed==================="
+ exit 1
+fi
+}
+
cd ../../
WKC=`pwd`
-cd ${WKC}/src/connector/C#
-dotnet test
-# run example under Driver
-cd ${WKC}/src/connector/C#/examples
-dotnet run
-
-#dotnet run --project src/test/Cases/Cases.csproj
+echo "WKC:${WKC}"
# run example with neuget package
cd ${WKC}/tests/examples/C#
+
dotnet run --project C#checker/C#checker.csproj
+check $? C#checker.csproj
+
dotnet run --project TDengineTest/TDengineTest.csproj
+check $? TDengineTest.csproj
+
dotnet run --project schemaless/schemaless.csproj
+check $? schemaless.csproj
+
dotnet run --project jsonTag/jsonTag.csproj
+check $? jsonTag.csproj
+
dotnet run --project stmt/stmt.csproj
+check $? stmt.csproj
+
+dotnet run --project insertCn/insertCn.csproj
+check $? insertCn.csproj
cd ${WKC}/tests/examples/C#/taosdemo
dotnet build -c Release
tree | true
./bin/Release/net5.0/taosdemo -c /etc/taos -y
+check $? taosdemo
diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/insert_alltypes_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/insert_alltypes_json.py
index 5239d1b5fbc392a1d1b8b2d63cd1808ae4d97d80..c86d5300f59e66eb680579a63e075de099aabff7 100644
--- a/tests/develop-test/5-taos-tools/taosbenchmark/insert_alltypes_json.py
+++ b/tests/develop-test/5-taos-tools/taosbenchmark/insert_alltypes_json.py
@@ -117,8 +117,6 @@ class TDTestCase:
tdSql.checkData(0, 0, 160)
tdSql.query("select count(*) from db.stb where c13 = 'b1' or c13 = 'b2'")
tdSql.checkData(0, 0, 160)
- tdSql.query("select count(*) from db.stb where t0 >= 0 and t0 <= 10")
- tdSql.checkData(0, 0, 160)
tdSql.query("select count(*) from db.stb where t1 >= 0 and t1 <= 10")
tdSql.checkData(0, 0, 160)
tdSql.query("select count(*) from db.stb where t2 >= 0 and t2 <= 10")
@@ -326,4 +324,4 @@ class TDTestCase:
tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/limit_offset_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/limit_offset_json.py
index b7f3fcd8262d171ee16f4e7b8f0f5ebb7aa84b38..e2160f3c9c23db2e27d54b1559a4b808a9968d31 100644
--- a/tests/develop-test/5-taos-tools/taosbenchmark/limit_offset_json.py
+++ b/tests/develop-test/5-taos-tools/taosbenchmark/limit_offset_json.py
@@ -79,8 +79,6 @@ class TDTestCase:
tdSql.checkData(0, 0, 8)
tdSql.query("select count(*) from db.stb")
tdSql.checkData(0, 0, 40)
- tdSql.query("select distinct(c1) from db.stb")
- tdSql.checkData(0, 0, None)
tdSql.query("select distinct(c3) from db.stb")
tdSql.checkData(0, 0, None)
tdSql.query("select distinct(c4) from db.stb")
diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task
index 2174437f319d2ff812dccd6ee27fb7c87886b9e8..c2c462324e0cdc02b248ac704064d7c2ec76dfeb 100644
--- a/tests/parallel_test/cases.task
+++ b/tests/parallel_test/cases.task
@@ -1,4 +1,5 @@
# 20,,pytest,python3 insert/retentionpolicy.py change date time
+500,,docs-examples-test,./test_node.sh
299,,pytest,python3 test.py -f update/merge_commit_data-0.py
290,,pytest,python3 test.py -f update/merge_commit_data.py
241,,pytest,python3 test.py -f update/merge_commit_data2.py
@@ -236,6 +237,7 @@
30,,script,./test.sh -f general/import/commit.sim
30,,script,./test.sh -f general/compute/diff2.sim
30,,develop-test,bash 3-connectors/R/test.sh
+30,,develop-test,bash 3-connectors/c#/test.sh
29,,system-test,python3 ./test.py -f 0-others/create_col_tag.py
29,,script,./test.sh -f unique/arbitrator/dn3_mn1_full_createTableFail.sim
29,,script,./test.sh -f general/wal/maxtables.sim
@@ -592,7 +594,6 @@
8,,develop-test,python3 ./test.py -f 5-taos-tools/taosdump/taosdumpTestTypeInt.py
8,,develop-test,python3 ./test.py -f 5-taos-tools/taosdump/taosdumpTestTypeDouble.py
8,,pytest,python3 test.py -f update/update2.py
-7,,docs-examples-test,./test_node.sh
7,,system-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/taosdemoTestInsertWithJsonSml-otherPara.py
7,,pytest,python3 test.py -f tools/taosdumpTest2.py
7,,pytest,python3 test.py -f tools/taosdemoTestdatatype.py
@@ -813,6 +814,8 @@
3,,pytest,python3 test.py -f table/columnNameValidation.py
3,,pytest,python3 test.py -f table/tagNameCaseSensitive.py
3,,pytest,python3 test.py -f table/tbNameCaseSensitive.py
+3,,pytest,python3 test.py -f functions/function_max_row.py
+3,,pytest,python3 test.py -f functions/function_min_row.py
3,,develop-test,python3 ./test.py -f 2-query/ts_hidden_column.py
3,,develop-test,python3 ./test.py -f 2-query/ts_shortcut.py
3,,develop-test,python3 ./test.py -f 2-query/nchar_funcs.py
diff --git a/tests/pytest/functions/function_max_row.py b/tests/pytest/functions/function_max_row.py
new file mode 100644
index 0000000000000000000000000000000000000000..7ffa9858b93ba5f7bddcee55ebaabf138201c172
--- /dev/null
+++ b/tests/pytest/functions/function_max_row.py
@@ -0,0 +1,84 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+import numpy as np
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ self.rowNum = 10
+ self.tables = 10
+ self.ts = 1537146000000
+
+ def run(self):
+ tdSql.prepare()
+
+ intData = []
+ floatData = []
+
+ tdSql.execute("create table stb (ts timestamp, c1 int, c2 double, c3 float) tags(t1 int)")
+ for i in range(self.tables):
+ tdSql.execute("create table tb%d using stb tags(%d)" % (i, i))
+ sql = "insert into tb%d values" % i
+ for j in range(self.rowNum):
+ sql += "(%d, %d, %f, %f)" % (self.ts + j * 3000, j, j + 0.1, j + 0.1)
+ intData.append(j)
+ floatData.append(j + 0.1)
+ tdSql.execute(sql)
+
+ tdSql.error("select max_row(ts) from stb")
+ tdSql.error("select max_row(t1) from stb")
+
+ tdSql.query("select max_row(c1) from stb")
+ tdSql.checkData(0, 0, np.max(intData))
+
+ tdSql.query("select max_row(c1), * from stb")
+ tdSql.checkData(0, 0, np.max(intData))
+ tdSql.checkData(0, 2, np.max(intData))
+ tdSql.checkData(0, 3, np.max(floatData))
+ tdSql.checkData(0, 4, np.max(floatData))
+
+ tdSql.query("select max_row(c1), * from stb group by tbname")
+ for i in range(self.tables):
+ tdSql.checkData(i, 0, np.max(intData))
+ tdSql.checkData(i, 2, np.max(intData))
+ tdSql.checkData(i, 3, np.max(floatData))
+ tdSql.checkData(i, 4, np.max(floatData))
+
+ tdSql.query("select max_row(c1), * from stb interval(6s)")
+ tdSql.checkRows(5)
+
+ tdSql.query("select max_row(c1), * from tb1 interval(6s)")
+ tdSql.checkRows(5)
+
+ tdSql.query("select max_row(c1), * from stb interval(6s) group by tbname")
+ tdSql.checkRows(50)
+
+ tdSql.query("select max_row(c1), * from (select min_row(c1) c1, * from stb group by tbname)")
+ tdSql.checkData(0, 0, np.min(intData))
+ tdSql.checkRows(1)
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
diff --git a/tests/pytest/functions/function_min_row.py b/tests/pytest/functions/function_min_row.py
new file mode 100644
index 0000000000000000000000000000000000000000..9acc0eee5b638eb7c3312b0afe4bfe96a87f5746
--- /dev/null
+++ b/tests/pytest/functions/function_min_row.py
@@ -0,0 +1,84 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+import numpy as np
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ self.rowNum = 10
+ self.tables = 10
+ self.ts = 1537146000000
+
+ def run(self):
+ tdSql.prepare()
+
+ intData = []
+ floatData = []
+
+ tdSql.execute("create table stb (ts timestamp, c1 int, c2 double, c3 float) tags(t1 int)")
+ for i in range(self.tables):
+ tdSql.execute("create table tb%d using stb tags(%d)" % (i, i))
+ sql = "insert into tb%d values" % i
+ for j in range(self.rowNum):
+ sql += "(%d, %d, %f, %f)" % (self.ts + j * 3000, j, j + 0.1, j + 0.1)
+ intData.append(j)
+ floatData.append(j + 0.1)
+ tdSql.execute(sql)
+
+ tdSql.error("select min_row(ts) from stb")
+ tdSql.error("select min_row(t1) from stb")
+
+ tdSql.query("select min_row(c1) from stb")
+ tdSql.checkData(0, 0, np.min(intData))
+
+ tdSql.query("select min_row(c1), * from stb")
+ tdSql.checkData(0, 0, np.min(intData))
+ tdSql.checkData(0, 2, np.min(intData))
+ tdSql.checkData(0, 3, np.min(floatData))
+ tdSql.checkData(0, 4, np.min(floatData))
+
+ tdSql.query("select min_row(c1), * from stb group by tbname")
+ for i in range(self.tables):
+ tdSql.checkData(i, 0, np.min(intData))
+ tdSql.checkData(i, 2, np.min(intData))
+ tdSql.checkData(i, 3, np.min(floatData))
+ tdSql.checkData(i, 4, np.min(floatData))
+
+ tdSql.query("select min_row(c1), * from stb interval(6s)")
+ tdSql.checkRows(5)
+
+ tdSql.query("select min_row(c1), * from tb1 interval(6s)")
+ tdSql.checkRows(5)
+
+ tdSql.query("select min_row(c1), * from stb interval(6s) group by tbname")
+ tdSql.checkRows(50)
+
+ tdSql.query("select min_row(c1), * from (select max_row(c1) c1, * from stb group by tbname)")
+ tdSql.checkData(0, 0, np.max(intData))
+ tdSql.checkRows(1)
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
diff --git a/tests/pytest/query/nestedQuery/nestedQuery.py b/tests/pytest/query/nestedQuery/nestedQuery.py
index 89751bb7b808002b42e09d4a6bee2ef16e7ac775..9f9f56660cacd2b38d6c4f20b5da2cdc07e462dd 100755
--- a/tests/pytest/query/nestedQuery/nestedQuery.py
+++ b/tests/pytest/query/nestedQuery/nestedQuery.py
@@ -2233,10 +2233,10 @@ class TDTestCase:
sql = "select * from ( select ts , "
for i in range(4094):
sql += "c%d , " % (i)
- sql += "c4094 from d0 "
+ sql += "c4094 from d0 "
sql += " %s )" % random.choice(order_where)
sql += " %s ;" % random.choice(order_desc_where)
- tdLog.info(len(sql))
+ tdLog.info(len(sql))
tdSql.query(sql)
tdSql.checkCols(4096)
tdSql.checkRows(1000)
diff --git a/tests/pytest/query/queryBase.py b/tests/pytest/query/queryBase.py
index 4544fab3adcb6e760dcbc05ab56cd22edd35b3e2..9be950df49e7b2e34f88edaafd91fac37aa8a009 100644
--- a/tests/pytest/query/queryBase.py
+++ b/tests/pytest/query/queryBase.py
@@ -171,6 +171,11 @@ class TDTestCase:
tdSql.waitedQuery(sql, 1, WAITS)
tdSql.checkData(0, 1, 229400)
+ # TS-1664
+ tdSql.error("create database string")
+ tdSql.error("create table string(ts timestamp, c1 int)")
+ tdSql.error("select * from string")
+
#
# add case with filename
#
diff --git a/tests/pytest/query/queryError.py b/tests/pytest/query/queryError.py
index e5c468600ba56a251057f204971084fe2844a85e..7d752a9863312e42e303e5db34e18ad740bc5a19 100644
--- a/tests/pytest/query/queryError.py
+++ b/tests/pytest/query/queryError.py
@@ -68,6 +68,16 @@ class TDTestCase:
# TD-6006
tdSql.error("select * from dev_001 where 'name' is not null")
tdSql.error("select * from dev_001 where \"name\" = 'first'")
+
+ # TS-1577
+ tdSql.query("show databases")
+ rows = tdSql.queryRows
+
+ for i in range(1000):
+ tdSql.execute("create database test%d" % i)
+
+ tdSql.query("show databases")
+ tdSql.checkRows(rows + 1000)
def stop(self):
tdSql.close()
diff --git a/tests/pytest/table/tagNameCaseSensitive.py b/tests/pytest/table/tagNameCaseSensitive.py
index ebd7f55a2d569223d81f0fc6c92eb4e87424d8d0..c9ee64fa242484e64c9fa6cfdb6ed468436f2199 100644
--- a/tests/pytest/table/tagNameCaseSensitive.py
+++ b/tests/pytest/table/tagNameCaseSensitive.py
@@ -65,7 +65,7 @@ class TDTestCase:
tdSql.query("select * from `STB6`")
tdSql.checkRows(6)
- tdSql.execute("delete from `STB6` where ` ` = 1 and ts = '2022-06-24 11:17:31.000'")
+ tdSql.execute("delete from `STB6` where ` ` = 1 and ts = 1656040651000")
tdSql.checkAffectedRows(1)
tdSql.query("select * from `STB6`")
tdSql.checkRows(5)
@@ -74,6 +74,10 @@ class TDTestCase:
tdSql.query("select * from `STB6`")
tdSql.checkRows(2)
+ tdSql.execute("alter table `STB6` add tag `1` int")
+ tdSql.execute("create table t1 using `STB6`(`1`) tags(1)")
+ tdSql.error("alter table t1 set tag 1=2222")
+
tdSql.error("alter table `STB6` add tag `` nchar(20)")
def stop(self):
@@ -82,4 +86,4 @@ class TDTestCase:
tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py
index c047e4b0aadcc27e0014420c2d350f106125109c..667b859c8f35b492d96e7e7633ab8c728f09335a 100755
--- a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py
+++ b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py
@@ -56,18 +56,18 @@ class TDTestCase:
tdSql.execute("use regular_old")
tdSql.query("show tables;")
tdSql.checkRows(1)
- tdSql.query("select * from d0;")
+ tdSql.query("select * from meters;")
tdSql.checkCols(1024)
- tdSql.query("describe d0;")
+ tdSql.query("describe meters;")
tdSql.checkRows(1024)
os.system("%s -N -d regular_new -t 1 -n 10 -l 4095 -y" % binPath)
tdSql.execute("use regular_new")
tdSql.query("show tables;")
tdSql.checkRows(1)
- tdSql.query("select * from d0;")
+ tdSql.query("select * from meters;")
tdSql.checkCols(4096)
- tdSql.query("describe d0;")
+ tdSql.query("describe meters;")
tdSql.checkRows(4096)
# super table -d:database name -t:table num -n:rows num per table
diff --git a/tests/pytest/tools/taosdumpTest.py b/tests/pytest/tools/taosdumpTest.py
index 21555567765fcae218b254472b1330a11d83e77f..95ed69b9177a079a59002d87d4e6bccbd9f6dc9a 100644
--- a/tests/pytest/tools/taosdumpTest.py
+++ b/tests/pytest/tools/taosdumpTest.py
@@ -60,6 +60,13 @@ class TDTestCase:
else:
print("directory exists")
+ for i in range(1, 9):
+ if not os.path.exists("./taosdumptest/tmp%d" % i):
+ os.makedirs("./taosdumptest/tmp%d" % i)
+ else:
+ os.system("rm -rf ./taosdumptest/tmp%d" % i)
+ os.makedirs("./taosdumptest/tmp%d" % i)
+
if not os.path.exists("./taosdumptest/tmp2"):
os.makedirs("./taosdumptest/tmp2")
tdSql.execute("drop database if exists db")
diff --git a/tests/pytest/tools/taosdumpTest3.py b/tests/pytest/tools/taosdumpTest3.py
index 3994ad0323a3e1b5b968999178f1cae83c5e0753..e8fb46f3a8740d1c0f0360ad6bf93c51475b5603 100644
--- a/tests/pytest/tools/taosdumpTest3.py
+++ b/tests/pytest/tools/taosdumpTest3.py
@@ -57,6 +57,9 @@ class TDTestCase:
def run(self):
if not os.path.exists("./taosdumptest"):
os.makedirs("./taosdumptest")
+ else:
+ print("directory exists")
+
for i in range(1, 9):
if not os.path.exists("./taosdumptest/tmp%d" % i):
os.makedirs("./taosdumptest/tmp%d" % i)
diff --git a/tests/script/unique/account/paras.sim b/tests/script/unique/account/paras.sim
index 102f5b6a381e5100b35a4f0125b1318bcb8b1d76..77e010f5bd9269e868c4f963e7c920c41fade8a8 100644
--- a/tests/script/unique/account/paras.sim
+++ b/tests/script/unique/account/paras.sim
@@ -17,7 +17,7 @@ endi
if $data02 != 3/128 then
return -1
endi
-if $data03 != 0/128 then
+if $data03 != 0/32767 then
return -1
endi
if $data04 != 0/2147483647 then
@@ -111,4 +111,4 @@ if $data16 != 0.000/10.000 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/system-test/5-taos-tools/taosbenchmark/TD-5213/insert4096columns_not_use_taosdemo.py b/tests/system-test/5-taos-tools/taosbenchmark/TD-5213/insert4096columns_not_use_taosdemo.py
deleted file mode 100644
index ec55acb848352def34e3090e66c4ef392b737ce0..0000000000000000000000000000000000000000
--- a/tests/system-test/5-taos-tools/taosbenchmark/TD-5213/insert4096columns_not_use_taosdemo.py
+++ /dev/null
@@ -1,706 +0,0 @@
-###################################################################
-# Copyright (c) 2016 by TAOS Technologies, Inc.
-# All rights reserved.
-#
-# This file is proprietary and confidential to TAOS Technologies.
-# No part of this file may be reproduced, stored, transmitted,
-# disclosed or used in any form or by any means other than as
-# expressly provided by the written permission from Jianhui Tao
-#
-###################################################################
-
-# -*- coding: utf-8 -*-
-
-import random
-import string
-import os
-import time
-from util.log import tdLog
-from util.cases import tdCases
-from util.sql import tdSql
-from util.dnodes import tdDnodes
-
-class TDTestCase:
- updatecfgDict={'maxSQLLength':1048576}
- def init(self, conn, logSql):
- tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(), logSql)
-
- os.system("rm -rf tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py.sql")
-
- now = time.time()
- self.ts = int(round(now * 1000))
- self.num = 100
-
- def get_random_string(self, length):
- letters = string.ascii_lowercase
- result_str = ''.join(random.choice(letters) for i in range(length))
- return result_str
-
- def run(self):
- tdSql.prepare()
- # test case for https://jira.taosdata.com:18080/browse/TD-5213
-
- print("==============step1, regular table, 1 ts + 4094 cols + 1 binary==============")
- startTime = time.time()
- sql = "create table regular_table_1(ts timestamp, "
- for i in range(4094):
- sql += "col%d int, " % (i + 1)
- sql += "col4095 binary(22))"
- tdLog.info(len(sql))
- tdSql.execute(sql)
-
- for i in range(self.num):
- sql = "insert into regular_table_1 values(%d, "
- for j in range(4094):
- str = "'%s', " % random.randint(0,1000)
- sql += str
- sql += "'%s')" % self.get_random_string(22)
- tdSql.execute(sql % (self.ts + i))
- time.sleep(1)
- tdSql.query("select count(*) from regular_table_1")
- tdSql.checkData(0, 0, self.num)
- tdSql.query("select * from regular_table_1")
- tdSql.checkRows(self.num)
- tdSql.checkCols(4096)
-
- endTime = time.time()
- print("total time %ds" % (endTime - startTime))
-
- #insert in order
- tdLog.info('test insert in order')
- for i in range(self.num):
- sql = "insert into regular_table_1 (ts,col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col4095) values(%d, "
- for j in range(10):
- str = "'%s', " % random.randint(0,1000)
- sql += str
- sql += "'%s')" % self.get_random_string(22)
- tdSql.execute(sql % (self.ts + i + 1000))
- time.sleep(1)
- tdSql.query("select count(*) from regular_table_1")
- tdSql.checkData(0, 0, 2*self.num)
- tdSql.query("select * from regular_table_1")
- tdSql.checkRows(2*self.num)
- tdSql.checkCols(4096)
-
- #insert out of order
- tdLog.info('test insert out of order')
- for i in range(self.num):
- sql = "insert into regular_table_1 (ts,col123,col2213,col331,col41,col523,col236,col71,col813,col912,col1320,col4095) values(%d, "
- for j in range(10):
- str = "'%s', " % random.randint(0,1000)
- sql += str
- sql += "'%s')" % self.get_random_string(22)
- tdSql.execute(sql % (self.ts + i + 2000))
- time.sleep(1)
- tdSql.query("select count(*) from regular_table_1")
- tdSql.checkData(0, 0, 3*self.num)
- tdSql.query("select * from regular_table_1")
- tdSql.checkRows(3*self.num)
- tdSql.checkCols(4096)
-
-
- print("==============step2,regular table error col or value==============")
- tdLog.info('test regular table exceeds row num')
- # column > 4096
- sql = "create table regular_table_2(ts timestamp, "
- for i in range(4095):
- sql += "col%d int, " % (i + 1)
- sql += "col4096 binary(22))"
- tdLog.info(len(sql))
- tdSql.error(sql)
-
- # column > 4096
- sql = "insert into regular_table_1 values(%d, "
- for j in range(4095):
- str = "'%s', " % random.randint(0,1000)
- sql += str
- sql += "'%s')" % self.get_random_string(22)
- tdSql.error(sql)
-
- # insert column < 4096
- sql = "insert into regular_table_1 values(%d, "
- for j in range(4092):
- str = "'%s', " % random.randint(0,1000)
- sql += str
- sql += "'%s')" % self.get_random_string(22)
- tdSql.error(sql)
-
- # alter column > 4096
- sql = "alter table regular_table_1 add column max int; "
- tdSql.error(sql)
-
- print("==============step3,regular table , mix data type==============")
- startTime = time.time()
- sql = "create table regular_table_3(ts timestamp, "
- for i in range(2000):
- sql += "col%d int, " % (i + 1)
- for i in range(2000,4094):
- sql += "col%d bigint, " % (i + 1)
- sql += "col4095 binary(22))"
- tdLog.info(len(sql))
- tdSql.execute(sql)
-
- for i in range(self.num):
- sql = "insert into regular_table_3 values(%d, "
- for j in range(4094):
- str = "'%s', " % random.randint(0,1000)
- sql += str
- sql += "'%s')" % self.get_random_string(22)
- tdSql.execute(sql % (self.ts + i))
- time.sleep(1)
- tdSql.query("select count(*) from regular_table_3")
- tdSql.checkData(0, 0, self.num)
- tdSql.query("select * from regular_table_3")
- tdSql.checkRows(self.num)
- tdSql.checkCols(4096)
-
- endTime = time.time()
- print("total time %ds" % (endTime - startTime))
-
- sql = "create table regular_table_4(ts timestamp, "
- for i in range(500):
- sql += "int_%d int, " % (i + 1)
- for i in range(500,1000):
- sql += "smallint_%d smallint, " % (i + 1)
- for i in range(1000,1500):
- sql += "tinyint_%d tinyint, " % (i + 1)
- for i in range(1500,2000):
- sql += "double_%d double, " % (i + 1)
- for i in range(2000,2500):
- sql += "float_%d float, " % (i + 1)
- for i in range(2500,3000):
- sql += "bool_%d bool, " % (i + 1)
- for i in range(3000,3500):
- sql += "bigint_%d bigint, " % (i + 1)
- for i in range(3500,3800):
- sql += "nchar_%d nchar(4), " % (i + 1)
- for i in range(3800,4090):
- sql += "binary_%d binary(10), " % (i + 1)
- for i in range(4090,4094):
- sql += "timestamp_%d timestamp, " % (i + 1)
- sql += "col4095 binary(22))"
- tdLog.info(len(sql))
- tdSql.execute(sql)
-
- for i in range(self.num):
- sql = "insert into regular_table_4 values(%d, "
- for j in range(500):
- str = "'%s', " % random.randint(-2147483647,2147483647)
- sql += str
- for j in range(500,1000):
- str = "'%s', " % random.randint(-32767,32767 )
- sql += str
- for j in range(1000,1500):
- str = "'%s', " % random.randint(-127,127)
- sql += str
- for j in range(1500,2000):
- str = "'%s', " % random.randint(-922337203685477580700,922337203685477580700)
- sql += str
- for j in range(2000,2500):
- str = "'%s', " % random.randint(-92233720368547758070,92233720368547758070)
- sql += str
- for j in range(2500,3000):
- str = "'%s', " % random.choice(['true','false'])
- sql += str
- for j in range(3000,3500):
- str = "'%s', " % random.randint(-9223372036854775807,9223372036854775807)
- sql += str
- for j in range(3500,3800):
- str = "'%s', " % self.get_random_string(4)
- sql += str
- for j in range(3800,4090):
- str = "'%s', " % self.get_random_string(10)
- sql += str
- for j in range(4090,4094):
- str = "%s, " % (self.ts + j)
- sql += str
- sql += "'%s')" % self.get_random_string(22)
- tdSql.execute(sql % (self.ts + i))
- time.sleep(1)
- tdSql.query("select count(*) from regular_table_4")
- tdSql.checkData(0, 0, self.num)
- tdSql.query("select * from regular_table_4")
- tdSql.checkRows(self.num)
- tdSql.checkCols(4096)
- tdLog.info("end ,now new one")
-
- #insert null value
- tdLog.info('test insert null value')
- for i in range(self.num):
- sql = "insert into regular_table_4 values(%d, "
- for j in range(2500):
- str = "'%s', " % random.choice(['NULL' ,'NULL' ,'NULL' ,1 , 10 ,100 ,-100 ,-10, 88 ,66 ,'NULL' ,'NULL' ,'NULL' ])
- sql += str
- for j in range(2500,3000):
- str = "'%s', " % random.choice(['true' ,'false'])
- sql += str
- for j in range(3000,3500):
- str = "'%s', " % random.randint(-9223372036854775807,9223372036854775807)
- sql += str
- for j in range(3500,3800):
- str = "'%s', " % self.get_random_string(4)
- sql += str
- for j in range(3800,4090):
- str = "'%s', " % self.get_random_string(10)
- sql += str
- for j in range(4090,4094):
- str = "%s, " % (self.ts + j)
- sql += str
- sql += "'%s')" % self.get_random_string(22)
- tdSql.execute(sql % (self.ts + i + 10000))
- time.sleep(1)
- tdSql.query("select count(*) from regular_table_4")
- tdSql.checkData(0, 0, 2*self.num)
- tdSql.query("select * from regular_table_4")
- tdSql.checkRows(2*self.num)
- tdSql.checkCols(4096)
-
- #insert in order
- tdLog.info('test insert in order')
- for i in range(self.num):
- sql = "insert into regular_table_4 (ts,int_2,int_22,int_169,smallint_537,smallint_607,tinyint_1030,tinyint_1491,double_1629,double_1808,float_2075,col4095) values(%d, "
- for j in range(10):
- str = "'%s', " % random.randint(0,100)
- sql += str
- sql += "'%s')" % self.get_random_string(22)
- tdSql.execute(sql % (self.ts + i + 1000))
- time.sleep(1)
- tdSql.query("select count(*) from regular_table_4")
- tdSql.checkData(0, 0, 3*self.num)
- tdSql.query("select * from regular_table_4")
- tdSql.checkRows(3*self.num)
- tdSql.checkCols(4096)
-
- #insert out of order
- tdLog.info('test insert out of order')
- for i in range(self.num):
- sql = "insert into regular_table_4 (ts,int_169,float_2075,int_369,tinyint_1491,tinyint_1030,float_2360,smallint_537,double_1808,double_1608,double_1629,col4095) values(%d, "
- for j in range(10):
- str = "'%s', " % random.randint(0,100)
- sql += str
- sql += "'%s')" % self.get_random_string(22)
- tdSql.execute(sql % (self.ts + i + 2000))
- time.sleep(1)
- tdSql.query("select count(*) from regular_table_4")
- tdSql.checkData(0, 0, 4*self.num)
- tdSql.query("select * from regular_table_4")
- tdSql.checkRows(4*self.num)
- tdSql.checkCols(4096)
-
- #define TSDB_MAX_BYTES_PER_ROW 49151[old:1024 && 16384]
- #ts:8\int:4\smallint:2\bigint:8\bool:1\float:4\tinyint:1\nchar:4*()+2[offset]\binary:1*()+2[offset]
- tdLog.info('test regular_table max bytes per row 49151')
- sql = "create table regular_table_5(ts timestamp, "
- for i in range(500):
- sql += "int_%d int, " % (i + 1)
- for i in range(500,1000):
- sql += "smallint_%d smallint, " % (i + 1)
- for i in range(1000,1500):
- sql += "tinyint_%d tinyint, " % (i + 1)
- for i in range(1500,2000):
- sql += "double_%d double, " % (i + 1)
- for i in range(2000,2500):
- sql += "float_%d float, " % (i + 1)
- for i in range(2500,3000):
- sql += "bool_%d bool, " % (i + 1)
- for i in range(3000,3500):
- sql += "bigint_%d bigint, " % (i + 1)
- for i in range(3500,3800):
- sql += "nchar_%d nchar(20), " % (i + 1)
- for i in range(3800,4090):
- sql += "binary_%d binary(34), " % (i + 1)
- for i in range(4090,4094):
- sql += "timestamp_%d timestamp, " % (i + 1)
- sql += "col4095 binary(69))"
- tdSql.execute(sql)
- tdSql.query("select * from regular_table_5")
- tdSql.checkCols(4096)
- # TD-5324
- sql = "alter table regular_table_5 modify column col4095 binary(70); "
- tdSql.error(sql)
-
- # drop and add
- sql = "alter table regular_table_5 drop column col4095; "
- tdSql.execute(sql)
- sql = "select * from regular_table_5; "
- tdSql.query(sql)
- tdSql.checkCols(4095)
- sql = "alter table regular_table_5 add column col4095 binary(70); "
- tdSql.error(sql)
- sql = "alter table regular_table_5 add column col4095 binary(69); "
- tdSql.execute(sql)
- sql = "select * from regular_table_5; "
- tdSql.query(sql)
- tdSql.checkCols(4096)
-
- #out TSDB_MAX_BYTES_PER_ROW 49151
- tdLog.info('test regular_table max bytes per row out 49151')
- sql = "create table regular_table_6(ts timestamp, "
- for i in range(500):
- sql += "int_%d int, " % (i + 1)
- for i in range(500,1000):
- sql += "smallint_%d smallint, " % (i + 1)
- for i in range(1000,1500):
- sql += "tinyint_%d tinyint, " % (i + 1)
- for i in range(1500,2000):
- sql += "double_%d double, " % (i + 1)
- for i in range(2000,2500):
- sql += "float_%d float, " % (i + 1)
- for i in range(2500,3000):
- sql += "bool_%d bool, " % (i + 1)
- for i in range(3000,3500):
- sql += "bigint_%d bigint, " % (i + 1)
- for i in range(3500,3800):
- sql += "nchar_%d nchar(20), " % (i + 1)
- for i in range(3800,4090):
- sql += "binary_%d binary(34), " % (i + 1)
- for i in range(4090,4094):
- sql += "timestamp_%d timestamp, " % (i + 1)
- sql += "col4095 binary(70))"
- tdLog.info(len(sql))
- tdSql.error(sql)
-
-
- print("==============step4, super table , 1 ts + 4090 cols + 4 tags ==============")
- startTime = time.time()
- sql = "create stable stable_1(ts timestamp, "
- for i in range(4090):
- sql += "col%d int, " % (i + 1)
- sql += "col4091 binary(22))"
- sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) "
- tdLog.info(len(sql))
- tdSql.execute(sql)
- sql = '''create table table_0 using stable_1
- tags('table_0' , '1' , '2' , '3' );'''
- tdSql.execute(sql)
-
- for i in range(self.num):
- sql = "insert into table_0 values(%d, "
- for j in range(4090):
- str = "'%s', " % random.randint(0,1000)
- sql += str
- sql += "'%s')" % self.get_random_string(22)
- tdSql.execute(sql % (self.ts + i))
- time.sleep(1)
- tdSql.query("select count(*) from table_0")
- tdSql.checkData(0, 0, self.num)
- tdSql.query("select * from table_0")
- tdSql.checkRows(self.num)
- tdSql.checkCols(4092)
-
- sql = '''create table table_1 using stable_1
- tags('table_1' , '1' , '2' , '3' );'''
- tdSql.execute(sql)
-
- for i in range(self.num):
- sql = "insert into table_1 values(%d, "
- for j in range(2080):
- sql += "'%d', " % random.randint(0,1000)
- for j in range(2080,4080):
- sql += "'%s', " % 'NULL'
- for j in range(4080,4090):
- sql += "'%s', " % random.randint(0,10000)
- sql += "'%s')" % self.get_random_string(22)
- tdSql.execute(sql % (self.ts + i))
- time.sleep(1)
- tdSql.query("select count(*) from table_1")
- tdSql.checkData(0, 0, self.num)
- tdSql.query("select * from table_1")
- tdSql.checkRows(self.num)
- tdSql.checkCols(4092)
-
- endTime = time.time()
- print("total time %ds" % (endTime - startTime))
-
- #insert in order
- tdLog.info('test insert in order')
- for i in range(self.num):
- sql = "insert into table_1 (ts,col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col4091) values(%d, "
- for j in range(10):
- str = "'%s', " % random.randint(0,1000)
- sql += str
- sql += "'%s')" % self.get_random_string(22)
- tdSql.execute(sql % (self.ts + i + 1000))
- time.sleep(1)
- tdSql.query("select count(*) from table_1")
- tdSql.checkData(0, 0, 2*self.num)
- tdSql.query("select * from table_1")
- tdSql.checkRows(2*self.num)
- tdSql.checkCols(4092)
-
- #insert out of order
- tdLog.info('test insert out of order')
- for i in range(self.num):
- sql = "insert into table_1 (ts,col123,col2213,col331,col41,col523,col236,col71,col813,col912,col1320,col4091) values(%d, "
- for j in range(10):
- str = "'%s', " % random.randint(0,1000)
- sql += str
- sql += "'%s')" % self.get_random_string(22)
- tdSql.execute(sql % (self.ts + i + 2000))
- time.sleep(1)
- tdSql.query("select count(*) from table_1")
- tdSql.checkData(0, 0, 3*self.num)
- tdSql.query("select * from table_1")
- tdSql.checkRows(3*self.num)
- tdSql.checkCols(4092)
-
- print("==============step5,stable table , mix data type==============")
- sql = "create stable stable_3(ts timestamp, "
- for i in range(500):
- sql += "int_%d int, " % (i + 1)
- for i in range(500,1000):
- sql += "smallint_%d smallint, " % (i + 1)
- for i in range(1000,1500):
- sql += "tinyint_%d tinyint, " % (i + 1)
- for i in range(1500,2000):
- sql += "double_%d double, " % (i + 1)
- for i in range(2000,2500):
- sql += "float_%d float, " % (i + 1)
- for i in range(2500,3000):
- sql += "bool_%d bool, " % (i + 1)
- for i in range(3000,3500):
- sql += "bigint_%d bigint, " % (i + 1)
- for i in range(3500,3800):
- sql += "nchar_%d nchar(4), " % (i + 1)
- for i in range(3800,4090):
- sql += "binary_%d binary(10), " % (i + 1)
- sql += "col4091 binary(22))"
- sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) "
- tdLog.info(len(sql))
- tdSql.execute(sql)
- sql = '''create table table_30 using stable_3
- tags('table_30' , '1' , '2' , '3' );'''
- tdSql.execute(sql)
-
- for i in range(self.num):
- sql = "insert into table_30 values(%d, "
- for j in range(500):
- str = "'%s', " % random.randint(-2147483647,2147483647)
- sql += str
- for j in range(500,1000):
- str = "'%s', " % random.randint(-32767,32767 )
- sql += str
- for j in range(1000,1500):
- str = "'%s', " % random.randint(-127,127)
- sql += str
- for j in range(1500,2000):
- str = "'%s', " % random.randint(-922337203685477580700,922337203685477580700)
- sql += str
- for j in range(2000,2500):
- str = "'%s', " % random.randint(-92233720368547758070,92233720368547758070)
- sql += str
- for j in range(2500,3000):
- str = "'%s', " % random.choice(['true','false'])
- sql += str
- for j in range(3000,3500):
- str = "'%s', " % random.randint(-9223372036854775807,9223372036854775807)
- sql += str
- for j in range(3500,3800):
- str = "'%s', " % self.get_random_string(4)
- sql += str
- for j in range(3800,4090):
- str = "'%s', " % self.get_random_string(10)
- sql += str
- sql += "'%s')" % self.get_random_string(22)
- tdSql.execute(sql % (self.ts + i))
- time.sleep(1)
- tdSql.query("select count(*) from table_30")
- tdSql.checkData(0, 0, self.num)
- tdSql.query("select * from table_30")
- tdSql.checkRows(self.num)
- tdSql.checkCols(4092)
-
- #insert null value
- tdLog.info('test insert null value')
- sql = '''create table table_31 using stable_3
- tags('table_31' , '1' , '2' , '3' );'''
- tdSql.execute(sql)
-
- for i in range(self.num):
- sql = "insert into table_31 values(%d, "
- for j in range(2500):
- str = "'%s', " % random.choice(['NULL' ,'NULL' ,'NULL' ,1 , 10 ,100 ,-100 ,-10, 88 ,66 ,'NULL' ,'NULL' ,'NULL' ])
- sql += str
- for j in range(2500,3000):
- str = "'%s', " % random.choice(['true' ,'false'])
- sql += str
- for j in range(3000,3500):
- str = "'%s', " % random.randint(-9223372036854775807,9223372036854775807)
- sql += str
- for j in range(3500,3800):
- str = "'%s', " % self.get_random_string(4)
- sql += str
- for j in range(3800,4090):
- str = "'%s', " % self.get_random_string(10)
- sql += str
- sql += "'%s')" % self.get_random_string(22)
- tdSql.execute(sql % (self.ts + i))
- time.sleep(1)
- tdSql.query("select count(*) from table_31")
- tdSql.checkData(0, 0, self.num)
- tdSql.query("select * from table_31")
- tdSql.checkRows(self.num)
- tdSql.checkCols(4092)
-
- #insert in order
- tdLog.info('test insert in order')
- for i in range(self.num):
- sql = "insert into table_31 (ts,int_2,int_22,int_169,smallint_537,smallint_607,tinyint_1030,tinyint_1491,double_1629,double_1808,float_2075,col4091) values(%d, "
- for j in range(10):
- str = "'%s', " % random.randint(0,100)
- sql += str
- sql += "'%s')" % self.get_random_string(22)
- tdSql.execute(sql % (self.ts + i + 1000))
- time.sleep(1)
- tdSql.query("select count(*) from table_31")
- tdSql.checkData(0, 0, 2*self.num)
- tdSql.query("select * from table_31")
- tdSql.checkRows(2*self.num)
- tdSql.checkCols(4092)
-
- #insert out of order
- tdLog.info('test insert out of order')
- for i in range(self.num):
- sql = "insert into table_31 (ts,int_169,float_2075,int_369,tinyint_1491,tinyint_1030,float_2360,smallint_537,double_1808,double_1608,double_1629,col4091) values(%d, "
- for j in range(10):
- str = "'%s', " % random.randint(0,100)
- sql += str
- sql += "'%s')" % self.get_random_string(22)
- tdSql.execute(sql % (self.ts + i + 2000))
- time.sleep(1)
- tdSql.query("select count(*) from table_31")
- tdSql.checkData(0, 0, 3*self.num)
- tdSql.query("select * from table_31")
- tdSql.checkRows(3*self.num)
- tdSql.checkCols(4092)
-
- #define TSDB_MAX_BYTES_PER_ROW 49151 TSDB_MAX_TAGS_LEN 16384
- #ts:8\int:4\smallint:2\bigint:8\bool:1\float:4\tinyint:1\nchar:4*()+2[offset]\binary:1*()+2[offset]
- tdLog.info('test super table max bytes per row 49151')
- sql = "create table stable_4(ts timestamp, "
- for i in range(500):
- sql += "int_%d int, " % (i + 1)
- for i in range(500,1000):
- sql += "smallint_%d smallint, " % (i + 1)
- for i in range(1000,1500):
- sql += "tinyint_%d tinyint, " % (i + 1)
- for i in range(1500,2000):
- sql += "double_%d double, " % (i + 1)
- for i in range(2000,2500):
- sql += "float_%d float, " % (i + 1)
- for i in range(2500,3000):
- sql += "bool_%d bool, " % (i + 1)
- for i in range(3000,3500):
- sql += "bigint_%d bigint, " % (i + 1)
- for i in range(3500,3800):
- sql += "nchar_%d nchar(20), " % (i + 1)
- for i in range(3800,4090):
- sql += "binary_%d binary(34), " % (i + 1)
- sql += "col4091 binary(101))"
- sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) "
- tdSql.execute(sql)
- sql = '''create table table_40 using stable_4
- tags('table_40' , '1' , '2' , '3' );'''
- tdSql.execute(sql)
- tdSql.query("select * from table_40")
- tdSql.checkCols(4092)
- tdSql.query("describe table_40")
- tdSql.checkRows(4096)
-
- tdLog.info('test super table drop and add column or tag')
- sql = "alter stable stable_4 drop column col4091; "
- tdSql.execute(sql)
- sql = "select * from stable_4; "
- tdSql.query(sql)
- tdSql.checkCols(4095)
- sql = "alter table stable_4 add column col4091 binary(102); "
- tdSql.error(sql)
- sql = "alter table stable_4 add column col4091 binary(101); "
- tdSql.execute(sql)
- sql = "select * from stable_4; "
- tdSql.query(sql)
- tdSql.checkCols(4096)
-
- sql = "alter stable stable_4 drop tag tag_1; "
- tdSql.execute(sql)
- sql = "select * from stable_4; "
- tdSql.query(sql)
- tdSql.checkCols(4095)
- sql = "alter table stable_4 add tag tag_1 int; "
- tdSql.execute(sql)
- sql = "select * from stable_4; "
- tdSql.query(sql)
- tdSql.checkCols(4096)
- sql = "alter table stable_4 add tag loc1 nchar(10); "
- tdSql.error(sql)
-
- tdLog.info('test super table max bytes per row 49151')
- sql = "create table stable_5(ts timestamp, "
- for i in range(500):
- sql += "int_%d int, " % (i + 1)
- for i in range(500,1000):
- sql += "smallint_%d smallint, " % (i + 1)
- for i in range(1000,1500):
- sql += "tinyint_%d tinyint, " % (i + 1)
- for i in range(1500,2000):
- sql += "double_%d double, " % (i + 1)
- for i in range(2000,2500):
- sql += "float_%d float, " % (i + 1)
- for i in range(2500,3000):
- sql += "bool_%d bool, " % (i + 1)
- for i in range(3000,3500):
- sql += "bigint_%d bigint, " % (i + 1)
- for i in range(3500,3800):
- sql += "nchar_%d nchar(20), " % (i + 1)
- for i in range(3800,4090):
- sql += "binary_%d binary(34), " % (i + 1)
- sql += "col4091 binary(102))"
- sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) "
- tdSql.error(sql)
-
- print("==============step6, super table error col ==============")
- tdLog.info('test exceeds row num')
- # column + tag > 4096
- sql = "create stable stable_2(ts timestamp, "
- for i in range(4091):
- sql += "col%d int, " % (i + 1)
- sql += "col4092 binary(22))"
- sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) "
- tdLog.info(len(sql))
- tdSql.error(sql)
-
- # column + tag > 4096
- sql = "create stable stable_2(ts timestamp, "
- for i in range(4090):
- sql += "col%d int, " % (i + 1)
- sql += "col4091 binary(22))"
- sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int,tag_4 int) "
- tdLog.info(len(sql))
- tdSql.error(sql)
-
- # alter column + tag > 4096
- sql = "alter table stable_1 add column max int; "
- tdSql.error(sql)
- # TD-5322
- sql = "alter table stable_1 add tag max int; "
- tdSql.error(sql)
- # TD-5324
- sql = "alter table stable_4 modify column col4091 binary(102); "
- tdSql.error(sql)
- sql = "alter table stable_4 modify tag loc nchar(20); "
- tdSql.query("select * from table_40")
- tdSql.checkCols(4092)
- tdSql.query("describe table_40")
- tdSql.checkRows(4096)
-
-
-
-
- def stop(self):
- tdSql.close()
- tdLog.success("%s successfully executed" % __file__)
-
-
-tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/system-test/5-taos-tools/taosbenchmark/TD-5213/insertSigcolumnsNum4096.csv b/tests/system-test/5-taos-tools/taosbenchmark/TD-5213/insertSigcolumnsNum4096.csv
deleted file mode 100755
index 5b30be5b4c4d5c323141097af6207ffb8bb93449..0000000000000000000000000000000000000000
--- a/tests/system-test/5-taos-tools/taosbenchmark/TD-5213/insertSigcolumnsNum4096.csv
+++ /dev/null
@@ -1,3 +0,0 @@
-1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 483, 484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555, 556, 557, 558, 559, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, 595, 596, 597, 598, 599, 600, 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 670, 671, 672, 673, 674, 675, 676, 677, 678, 679, 680, 681, 682, 683, 684, 685, 686, 687, 688, 689, 690, 691, 692, 693, 694, 695, 696, 697, 698, 699, 700, 701, 702, 703, 704, 705, 706, 707, 708, 709, 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 720, 721, 722, 723, 724, 725, 726, 727, 728, 729, 730, 731, 732, 733, 734, 735, 736, 737, 738, 739, 740, 741, 742, 743, 744, 745, 746, 747, 748, 749, 750, 751, 752, 753, 754, 755, 756, 757, 758, 759, 760, 761, 762, 763, 764, 765, 766, 767, 768, 769, 770, 771, 772, 773, 774, 775, 776, 777, 778, 779, 780, 781, 782, 783, 784, 785, 786, 787, 788, 789, 790, 791, 792, 793, 794, 795, 796, 797, 798, 799, 800, 801, 802, 803, 804, 805, 806, 807, 808, 809, 810, 811, 812, 813, 814, 815, 816, 817, 818, 819, 820, 821, 822, 823, 824, 825, 826, 827, 828, 829, 830, 831, 832, 833, 834, 835, 836, 837, 838, 839, 840, 841, 842, 843, 844, 845, 846, 847, 848, 849, 850, 851, 852, 853, 854, 855, 856, 857, 858, 859, 860, 861, 862, 863, 864, 865, 866, 867, 868, 869, 870, 871, 872, 873, 874, 875, 876, 877, 878, 879, 880, 881, 882, 883, 884, 885, 886, 887, 888, 889, 890, 891, 892, 893, 894, 895, 896, 897, 898, 899, 900, 901, 902, 903, 904, 905, 906, 907, 908, 909, 910, 911, 912, 913, 914, 915, 916, 917, 918, 919, 920, 921, 922, 923, 924, 925, 926, 927, 928, 929, 930, 931, 932, 933, 934, 935, 936, 937, 938, 939, 940, 941, 942, 943, 944, 945, 946, 947, 948, 949, 950, 951, 952, 953, 954, 955, 956, 957, 958, 959, 960, 961, 962, 963, 964, 965, 966, 967, 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, 994, 995, 996, 997, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1093, 1094, 1095, 1096, 1097, 1098, 1099, 1100, 1101, 1102, 1103, 1104, 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, 1113, 1114, 1115, 1116, 1117, 1118, 1119, 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129, 1130, 1131, 1132, 1133, 1134, 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, 1143, 1144, 1145, 1146, 1147, 1148, 1149, 1150, 1151, 1152, 1153, 1154, 1155, 1156, 1157, 1158, 1159, 1160, 1161, 1162, 1163, 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1171, 1172, 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, 1192, 1193, 1194, 1195, 1196, 1197, 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, 1221, 1222, 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, 1239, 1240, 1241, 1242, 1243, 1244, 1245, 1246, 1247, 1248, 1249, 1250, 1251, 1252, 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260, 1261, 1262, 1263, 1264, 1265, 1266, 1267, 1268, 1269, 1270, 1271, 1272, 1273, 1274, 1275, 1276, 1277, 1278, 1279, 1280, 1281, 1282, 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, 1295, 1296, 1297, 1298, 1299, 1300, 1301, 1302, 1303, 1304, 1305, 1306, 1307, 1308, 1309, 1310, 1311, 1312, 1313, 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1321, 1322, 1323, 1324, 1325, 1326, 1327, 1328, 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344, 1345, 1346, 1347, 1348, 1349, 1350, 1351, 1352, 1353, 1354, 1355, 1356, 1357, 1358, 1359, 1360, 1361, 1362, 1363, 1364, 1365, 1366, 1367, 1368, 1369, 1370, 1371, 1372, 1373, 1374, 1375, 1376, 1377, 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1405, 1406, 1407, 1408, 1409, 1410, 1411, 1412, 1413, 1414, 1415, 1416, 1417, 1418, 1419, 1420, 1421, 1422, 1423, 1424, 1425, 1426, 1427, 1428, 1429, 1430, 1431, 1432, 1433, 1434, 1435, 1436, 1437, 1438, 1439, 1440, 1441, 1442, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1450, 1451, 1452, 1453, 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, 1462, 1463, 1464, 1465, 1466, 1467, 1468, 1469, 1470, 1471, 1472, 1473, 1474, 1475, 1476, 1477, 1478, 1479, 1480, 1481, 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489, 1490, 1491, 1492, 1493, 1494, 1495, 1496, 1497, 1498, 1499, 1500, 1501, 1502, 1503, 1504, 1505, 1506, 1507, 1508, 1509, 1510, 1511, 1512, 1513, 1514, 1515, 1516, 1517, 1518, 1519, 1520, 1521, 1522, 1523, 1524, 1525, 1526, 1527, 1528, 1529, 1530, 1531, 1532, 1533, 1534, 1535, 1536, 1537, 1538, 1539, 1540, 1541, 1542, 1543, 1544, 1545, 1546, 1547, 1548, 1549, 1550, 1551, 1552, 1553, 1554, 1555, 1556, 1557, 1558, 1559, 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, 1568, 1569, 1570, 1571, 1572, 1573, 1574, 1575, 1576, 1577, 1578, 1579, 1580, 1581, 1582, 1583, 1584, 1585, 1586, 1587, 1588, 1589, 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1597, 1598, 1599, 1600, 1601, 1602, 1603, 1604, 1605, 1606, 1607, 1608, 1609, 1610, 1611, 1612, 1613, 1614, 1615, 1616, 1617, 1618, 1619, 1620, 1621, 1622, 1623, 1624, 1625, 1626, 1627, 1628, 1629, 1630, 1631, 1632, 1633, 1634, 1635, 1636, 1637, 1638, 1639, 1640, 1641, 1642, 1643, 1644, 1645, 1646, 1647, 1648, 1649, 1650, 1651, 1652, 1653, 1654, 1655, 1656, 1657, 1658, 1659, 1660, 1661, 1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, 1670, 1671, 1672, 1673, 1674, 1675, 1676, 1677, 1678, 1679, 1680, 1681, 1682, 1683, 1684, 1685, 1686, 1687, 1688, 1689, 1690, 1691, 1692, 1693, 1694, 1695, 1696, 1697, 1698, 1699, 1700, 1701, 1702, 1703, 1704, 1705, 1706, 1707, 1708, 1709, 1710, 1711, 1712, 1713, 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, 1722, 1723, 1724, 1725, 1726, 1727, 1728, 1729, 1730, 1731, 1732, 1733, 1734, 1735, 1736, 1737, 1738, 1739, 1740, 1741, 1742, 1743, 1744, 1745, 1746, 1747, 1748, 1749, 1750, 1751, 1752, 1753, 1754, 1755, 1756, 1757, 1758, 1759, 1760, 1761, 1762, 1763, 1764, 1765, 1766, 1767, 1768, 1769, 1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777, 1778, 1779, 1780, 1781, 1782, 1783, 1784, 1785, 1786, 1787, 1788, 1789, 1790, 1791, 1792, 1793, 1794, 1795, 1796, 1797, 1798, 1799, 1800, 1801, 1802, 1803, 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1815, 1816, 1817, 1818, 1819, 1820, 1821, 1822, 1823, 1824, 1825, 1826, 1827, 1828, 1829, 1830, 1831, 1832, 1833, 1834, 1835, 1836, 1837, 1838, 1839, 1840, 1841, 1842, 1843, 1844, 1845, 1846, 1847, 1848, 1849, 1850, 1851, 1852, 1853, 1854, 1855, 1856, 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869, 1870, 1871, 1872, 1873, 1874, 1875, 1876, 1877, 1878, 1879, 1880, 1881, 1882, 1883, 1884, 1885, 1886, 1887, 1888, 1889, 1890, 1891, 1892, 1893, 1894, 1895, 1896, 1897, 1898, 1899, 1900, 1901, 1902, 1903, 1904, 1905, 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, 1914, 1915, 1916, 1917, 1918, 1919, 1920, 1921, 1922, 1923, 1924, 1925, 1926, 1927, 1928, 1929, 1930, 1931, 1932, 1933, 1934, 1935, 1936, 1937, 1938, 1939, 1940, 1941, 1942, 1943, 1944, 1945, 1946, 1947, 1948, 1949, 1950, 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 1960, 1961, 1962, 1963, 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029, 2030, 2031, 2032, 2033, 2034, 2035, 2036, 2037, 2038, 2039, 2040, 2041, 2042, 2043, 2044, 2045, 2046, 2047, 2048, 2049, 2050, 2051, 2052, 2053, 2054, 2055, 2056, 2057, 2058, 2059, 2060, 2061, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069, 2070, 2071, 2072, 2073, 2074, 2075, 2076, 2077, 2078, 2079, 2080, 2081, 2082, 2083, 2084, 2085, 2086, 2087, 2088, 2089, 2090, 2091, 2092, 2093, 2094, 2095, 2096, 2097, 2098, 2099, 2100, 2101, 2102, 2103, 2104, 2105, 2106, 2107, 2108, 2109, 2110, 2111, 2112, 2113, 2114, 2115, 2116, 2117, 2118, 2119, 2120, 2121, 2122, 2123, 2124, 2125, 2126, 2127, 2128, 2129, 2130, 2131, 2132, 2133, 2134, 2135, 2136, 2137, 2138, 2139, 2140, 2141, 2142, 2143, 2144, 2145, 2146, 2147, 2148, 2149, 2150, 2151, 2152, 2153, 2154, 2155, 2156, 2157, 2158, 2159, 2160, 2161, 2162, 2163, 2164, 2165, 2166, 2167, 2168, 2169, 2170, 2171, 2172, 2173, 2174, 2175, 2176, 2177, 2178, 2179, 2180, 2181, 2182, 2183, 2184, 2185, 2186, 2187, 2188, 2189, 2190, 2191, 2192, 2193, 2194, 2195, 2196, 2197, 2198, 2199, 2200, 2201, 2202, 2203, 2204, 2205, 2206, 2207, 2208, 2209, 2210, 2211, 2212, 2213, 2214, 2215, 2216, 2217, 2218, 2219, 2220, 2221, 2222, 2223, 2224, 2225, 2226, 2227, 2228, 2229, 2230, 2231, 2232, 2233, 2234, 2235, 2236, 2237, 2238, 2239, 2240, 2241, 2242, 2243, 2244, 2245, 2246, 2247, 2248, 2249, 2250, 2251, 2252, 2253, 2254, 2255, 2256, 2257, 2258, 2259, 2260, 2261, 2262, 2263, 2264, 2265, 2266, 2267, 2268, 2269, 2270, 2271, 2272, 2273, 2274, 2275, 2276, 2277, 2278, 2279, 2280, 2281, 2282, 2283, 2284, 2285, 2286, 2287, 2288, 2289, 2290, 2291, 2292, 2293, 2294, 2295, 2296, 2297, 2298, 2299, 2300, 2301, 2302, 2303, 2304, 2305, 2306, 2307, 2308, 2309, 2310, 2311, 2312, 2313, 2314, 2315, 2316, 2317, 2318, 2319, 2320, 2321, 2322, 2323, 2324, 2325, 2326, 2327, 2328, 2329, 2330, 2331, 2332, 2333, 2334, 2335, 2336, 2337, 2338, 2339, 2340, 2341, 2342, 2343, 2344, 2345, 2346, 2347, 2348, 2349, 2350, 2351, 2352, 2353, 2354, 2355, 2356, 2357, 2358, 2359, 2360, 2361, 2362, 2363, 2364, 2365, 2366, 2367, 2368, 2369, 2370, 2371, 2372, 2373, 2374, 2375, 2376, 2377, 2378, 2379, 2380, 2381, 2382, 2383, 2384, 2385, 2386, 2387, 2388, 2389, 2390, 2391, 2392, 2393, 2394, 2395, 2396, 2397, 2398, 2399, 2400, 2401, 2402, 2403, 2404, 2405, 2406, 2407, 2408, 2409, 2410, 2411, 2412, 2413, 2414, 2415, 2416, 2417, 2418, 2419, 2420, 2421, 2422, 2423, 2424, 2425, 2426, 2427, 2428, 2429, 2430, 2431, 2432, 2433, 2434, 2435, 2436, 2437, 2438, 2439, 2440, 2441, 2442, 2443, 2444, 2445, 2446, 2447, 2448, 2449, 2450, 2451, 2452, 2453, 2454, 2455, 2456, 2457, 2458, 2459, 2460, 2461, 2462, 2463, 2464, 2465, 2466, 2467, 2468, 2469, 2470, 2471, 2472, 2473, 2474, 2475, 2476, 2477, 2478, 2479, 2480, 2481, 2482, 2483, 2484, 2485, 2486, 2487, 2488, 2489, 2490, 2491, 2492, 2493, 2494, 2495, 2496, 2497, 2498, 2499, 2500, 2501, 2502, 2503, 2504, 2505, 2506, 2507, 2508, 2509, 2510, 2511, 2512, 2513, 2514, 2515, 2516, 2517, 2518, 2519, 2520, 2521, 2522, 2523, 2524, 2525, 2526, 2527, 2528, 2529, 2530, 2531, 2532, 2533, 2534, 2535, 2536, 2537, 2538, 2539, 2540, 2541, 2542, 2543, 2544, 2545, 2546, 2547, 2548, 2549, 2550, 2551, 2552, 2553, 2554, 2555, 2556, 2557, 2558, 2559, 2560, 2561, 2562, 2563, 2564, 2565, 2566, 2567, 2568, 2569, 2570, 2571, 2572, 2573, 2574, 2575, 2576, 2577, 2578, 2579, 2580, 2581, 2582, 2583, 2584, 2585, 2586, 2587, 2588, 2589, 2590, 2591, 2592, 2593, 2594, 2595, 2596, 2597, 2598, 2599, 2600, 2601, 2602, 2603, 2604, 2605, 2606, 2607, 2608, 2609, 2610, 2611, 2612, 2613, 2614, 2615, 2616, 2617, 2618, 2619, 2620, 2621, 2622, 2623, 2624, 2625, 2626, 2627, 2628, 2629, 2630, 2631, 2632, 2633, 2634, 2635, 2636, 2637, 2638, 2639, 2640, 2641, 2642, 2643, 2644, 2645, 2646, 2647, 2648, 2649, 2650, 2651, 2652, 2653, 2654, 2655, 2656, 2657, 2658, 2659, 2660, 2661, 2662, 2663, 2664, 2665, 2666, 2667, 2668, 2669, 2670, 2671, 2672, 2673, 2674, 2675, 2676, 2677, 2678, 2679, 2680, 2681, 2682, 2683, 2684, 2685, 2686, 2687, 2688, 2689, 2690, 2691, 2692, 2693, 2694, 2695, 2696, 2697, 2698, 2699, 2700, 2701, 2702, 2703, 2704, 2705, 2706, 2707, 2708, 2709, 2710, 2711, 2712, 2713, 2714, 2715, 2716, 2717, 2718, 2719, 2720, 2721, 2722, 2723, 2724, 2725, 2726, 2727, 2728, 2729, 2730, 2731, 2732, 2733, 2734, 2735, 2736, 2737, 2738, 2739, 2740, 2741, 2742, 2743, 2744, 2745, 2746, 2747, 2748, 2749, 2750, 2751, 2752, 2753, 2754, 2755, 2756, 2757, 2758, 2759, 2760, 2761, 2762, 2763, 2764, 2765, 2766, 2767, 2768, 2769, 2770, 2771, 2772, 2773, 2774, 2775, 2776, 2777, 2778, 2779, 2780, 2781, 2782, 2783, 2784, 2785, 2786, 2787, 2788, 2789, 2790, 2791, 2792, 2793, 2794, 2795, 2796, 2797, 2798, 2799, 2800, 2801, 2802, 2803, 2804, 2805, 2806, 2807, 2808, 2809, 2810, 2811, 2812, 2813, 2814, 2815, 2816, 2817, 2818, 2819, 2820, 2821, 2822, 2823, 2824, 2825, 2826, 2827, 2828, 2829, 2830, 2831, 2832, 2833, 2834, 2835, 2836, 2837, 2838, 2839, 2840, 2841, 2842, 2843, 2844, 2845, 2846, 2847, 2848, 2849, 2850, 2851, 2852, 2853, 2854, 2855, 2856, 2857, 2858, 2859, 2860, 2861, 2862, 2863, 2864, 2865, 2866, 2867, 2868, 2869, 2870, 2871, 2872, 2873, 2874, 2875, 2876, 2877, 2878, 2879, 2880, 2881, 2882, 2883, 2884, 2885, 2886, 2887, 2888, 2889, 2890, 2891, 2892, 2893, 2894, 2895, 2896, 2897, 2898, 2899, 2900, 2901, 2902, 2903, 2904, 2905, 2906, 2907, 2908, 2909, 2910, 2911, 2912, 2913, 2914, 2915, 2916, 2917, 2918, 2919, 2920, 2921, 2922, 2923, 2924, 2925, 2926, 2927, 2928, 2929, 2930, 2931, 2932, 2933, 2934, 2935, 2936, 2937, 2938, 2939, 2940, 2941, 2942, 2943, 2944, 2945, 2946, 2947, 2948, 2949, 2950, 2951, 2952, 2953, 2954, 2955, 2956, 2957, 2958, 2959, 2960, 2961, 2962, 2963, 2964, 2965, 2966, 2967, 2968, 2969, 2970, 2971, 2972, 2973, 2974, 2975, 2976, 2977, 2978, 2979, 2980, 2981, 2982, 2983, 2984, 2985, 2986, 2987, 2988, 2989, 2990, 2991, 2992, 2993, 2994, 2995, 2996, 2997, 2998, 2999, 3000, 3001, 3002, 3003, 3004, 3005, 3006, 3007, 3008, 3009, 3010, 3011, 3012, 3013, 3014, 3015, 3016, 3017, 3018, 3019, 3020, 3021, 3022, 3023, 3024, 3025, 3026, 3027, 3028, 3029, 3030, 3031, 3032, 3033, 3034, 3035, 3036, 3037, 3038, 3039, 3040, 3041, 3042, 3043, 3044, 3045, 3046, 3047, 3048, 3049, 3050, 3051, 3052, 3053, 3054, 3055, 3056, 3057, 3058, 3059, 3060, 3061, 3062, 3063, 3064, 3065, 3066, 3067, 3068, 3069, 3070, 3071, 3072, 3073, 3074, 3075, 3076, 3077, 3078, 3079, 3080, 3081, 3082, 3083, 3084, 3085, 3086, 3087, 3088, 3089, 3090, 3091, 3092, 3093, 3094, 3095, 3096, 3097, 3098, 3099, 3100, 3101, 3102, 3103, 3104, 3105, 3106, 3107, 3108, 3109, 3110, 3111, 3112, 3113, 3114, 3115, 3116, 3117, 3118, 3119, 3120, 3121, 3122, 3123, 3124, 3125, 3126, 3127, 3128, 3129, 3130, 3131, 3132, 3133, 3134, 3135, 3136, 3137, 3138, 3139, 3140, 3141, 3142, 3143, 3144, 3145, 3146, 3147, 3148, 3149, 3150, 3151, 3152, 3153, 3154, 3155, 3156, 3157, 3158, 3159, 3160, 3161, 3162, 3163, 3164, 3165, 3166, 3167, 3168, 3169, 3170, 3171, 3172, 3173, 3174, 3175, 3176, 3177, 3178, 3179, 3180, 3181, 3182, 3183, 3184, 3185, 3186, 3187, 3188, 3189, 3190, 3191, 3192, 3193, 3194, 3195, 3196, 3197, 3198, 3199, 3200, 3201, 3202, 3203, 3204, 3205, 3206, 3207, 3208, 3209, 3210, 3211, 3212, 3213, 3214, 3215, 3216, 3217, 3218, 3219, 3220, 3221, 3222, 3223, 3224, 3225, 3226, 3227, 3228, 3229, 3230, 3231, 3232, 3233, 3234, 3235, 3236, 3237, 3238, 3239, 3240, 3241, 3242, 3243, 3244, 3245, 3246, 3247, 3248, 3249, 3250, 3251, 3252, 3253, 3254, 3255, 3256, 3257, 3258, 3259, 3260, 3261, 3262, 3263, 3264, 3265, 3266, 3267, 3268, 3269, 3270, 3271, 3272, 3273, 3274, 3275, 3276, 3277, 3278, 3279, 3280, 3281, 3282, 3283, 3284, 3285, 3286, 3287, 3288, 3289, 3290, 3291, 3292, 3293, 3294, 3295, 3296, 3297, 3298, 3299, 3300, 3301, 3302, 3303, 3304, 3305, 3306, 3307, 3308, 3309, 3310, 3311, 3312, 3313, 3314, 3315, 3316, 3317, 3318, 3319, 3320, 3321, 3322, 3323, 3324, 3325, 3326, 3327, 3328, 3329, 3330, 3331, 3332, 3333, 3334, 3335, 3336, 3337, 3338, 3339, 3340, 3341, 3342, 3343, 3344, 3345, 3346, 3347, 3348, 3349, 3350, 3351, 3352, 3353, 3354, 3355, 3356, 3357, 3358, 3359, 3360, 3361, 3362, 3363, 3364, 3365, 3366, 3367, 3368, 3369, 3370, 3371, 3372, 3373, 3374, 3375, 3376, 3377, 3378, 3379, 3380, 3381, 3382, 3383, 3384, 3385, 3386, 3387, 3388, 3389, 3390, 3391, 3392, 3393, 3394, 3395, 3396, 3397, 3398, 3399, 3400, 3401, 3402, 3403, 3404, 3405, 3406, 3407, 3408, 3409, 3410, 3411, 3412, 3413, 3414, 3415, 3416, 3417, 3418, 3419, 3420, 3421, 3422, 3423, 3424, 3425, 3426, 3427, 3428, 3429, 3430, 3431, 3432, 3433, 3434, 3435, 3436, 3437, 3438, 3439, 3440, 3441, 3442, 3443, 3444, 3445, 3446, 3447, 3448, 3449, 3450, 3451, 3452, 3453, 3454, 3455, 3456, 3457, 3458, 3459, 3460, 3461, 3462, 3463, 3464, 3465, 3466, 3467, 3468, 3469, 3470, 3471, 3472, 3473, 3474, 3475, 3476, 3477, 3478, 3479, 3480, 3481, 3482, 3483, 3484, 3485, 3486, 3487, 3488, 3489, 3490, 3491, 3492, 3493, 3494, 3495, 3496, 3497, 3498, 3499, 3500, 3501, 3502, 3503, 3504, 3505, 3506, 3507, 3508, 3509, 3510, 3511, 3512, 3513, 3514, 3515, 3516, 3517, 3518, 3519, 3520, 3521, 3522, 3523, 3524, 3525, 3526, 3527, 3528, 3529, 3530, 3531, 3532, 3533, 3534, 3535, 3536, 3537, 3538, 3539, 3540, 3541, 3542, 3543, 3544, 3545, 3546, 3547, 3548, 3549, 3550, 3551, 3552, 3553, 3554, 3555, 3556, 3557, 3558, 3559, 3560, 3561, 3562, 3563, 3564, 3565, 3566, 3567, 3568, 3569, 3570, 3571, 3572, 3573, 3574, 3575, 3576, 3577, 3578, 3579, 3580, 3581, 3582, 3583, 3584, 3585, 3586, 3587, 3588, 3589, 3590, 3591, 3592, 3593, 3594, 3595, 3596, 3597, 3598, 3599, 3600, 3601, 3602, 3603, 3604, 3605, 3606, 3607, 3608, 3609, 3610, 3611, 3612, 3613, 3614, 3615, 3616, 3617, 3618, 3619, 3620, 3621, 3622, 3623, 3624, 3625, 3626, 3627, 3628, 3629, 3630, 3631, 3632, 3633, 3634, 3635, 3636, 3637, 3638, 3639, 3640, 3641, 3642, 3643, 3644, 3645, 3646, 3647, 3648, 3649, 3650, 3651, 3652, 3653, 3654, 3655, 3656, 3657, 3658, 3659, 3660, 3661, 3662, 3663, 3664, 3665, 3666, 3667, 3668, 3669, 3670, 3671, 3672, 3673, 3674, 3675, 3676, 3677, 3678, 3679, 3680, 3681, 3682, 3683, 3684, 3685, 3686, 3687, 3688, 3689, 3690, 3691, 3692, 3693, 3694, 3695, 3696, 3697, 3698, 3699, 3700, 3701, 3702, 3703, 3704, 3705, 3706, 3707, 3708, 3709, 3710, 3711, 3712, 3713, 3714, 3715, 3716, 3717, 3718, 3719, 3720, 3721, 3722, 3723, 3724, 3725, 3726, 3727, 3728, 3729, 3730, 3731, 3732, 3733, 3734, 3735, 3736, 3737, 3738, 3739, 3740, 3741, 3742, 3743, 3744, 3745, 3746, 3747, 3748, 3749, 3750, 3751, 3752, 3753, 3754, 3755, 3756, 3757, 3758, 3759, 3760, 3761, 3762, 3763, 3764, 3765, 3766, 3767, 3768, 3769, 3770, 3771, 3772, 3773, 3774, 3775, 3776, 3777, 3778, 3779, 3780, 3781, 3782, 3783, 3784, 3785, 3786, 3787, 3788, 3789, 3790, 3791, 3792, 3793, 3794, 3795, 3796, 3797, 3798, 3799, 3800, 3801, 3802, 3803, 3804, 3805, 3806, 3807, 3808, 3809, 3810, 3811, 3812, 3813, 3814, 3815, 3816, 3817, 3818, 3819, 3820, 3821, 3822, 3823, 3824, 3825, 3826, 3827, 3828, 3829, 3830, 3831, 3832, 3833, 3834, 3835, 3836, 3837, 3838, 3839, 3840, 3841, 3842, 3843, 3844, 3845, 3846, 3847, 3848, 3849, 3850, 3851, 3852, 3853, 3854, 3855, 3856, 3857, 3858, 3859, 3860, 3861, 3862, 3863, 3864, 3865, 3866, 3867, 3868, 3869, 3870, 3871, 3872, 3873, 3874, 3875, 3876, 3877, 3878, 3879, 3880, 3881, 3882, 3883, 3884, 3885, 3886, 3887, 3888, 3889, 3890, 3891, 3892, 3893, 3894, 3895, 3896, 3897, 3898, 3899, 3900, 3901, 3902, 3903, 3904, 3905, 3906, 3907, 3908, 3909, 3910, 3911, 3912, 3913, 3914, 3915, 3916, 3917, 3918, 3919, 3920, 3921, 3922, 3923, 3924, 3925, 3926, 3927, 3928, 3929, 3930, 3931, 3932, 3933, 3934, 3935, 3936, 3937, 3938, 3939, 3940, 3941, 3942, 3943, 3944, 3945, 3946, 3947, 3948, 3949, 3950, 3951, 3952, 3953, 3954, 3955, 3956, 3957, 3958, 3959, 3960, 3961, 3962, 3963, 3964, 3965, 3966, 3967, 3968, 3969, 3970, 3971, 3972, 3973, 3974, 3975, 3976, 3977, 3978, 3979, 3980, 3981, 3982, 3983, 3984, 3985, 3986, 3987, 3988, 3989, 3990, 3991, 3992, 3993, 3994, 3995, 3996, 3997, 3998, 3999, 4000, 4001, 4002, 4003, 4004, 4005, 4006, 4007, 4008, 4009, 4010, 4011, 4012, 4013, 4014, 4015, 4016, 4017, 4018, 4019, 4020, 4021, 4022, 4023, 4024, 4025, 4026, 4027, 4028, 4029, 4030, 4031, 4032, 4033, 4034, 4035, 4036, 4037, 4038, 4039, 4040, 4041, 4042, 4043, 4044, 4045, 4046, 4047, 4048, 4049, 4050, 4051, 4052, 4053, 4054, 4055, 4056, 4057, 4058, 4059, 4060, 4061, 4062, 4063, 4064, 4065, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073, 4074, 4075, 4076, 4077, 4078, 4079, 4080, 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091
-1,2,3,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL
-1,2,3,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,4,NULL,NULL,NULL,NULL,NULL,NULL,5,NULL,NULL,6,NULL,NULL,NULL,7,NULL,NULL,NULL,8,NULL,NULL,NULL,9,NULL,NULL,10
\ No newline at end of file
diff --git a/tests/system-test/5-taos-tools/taosbenchmark/TD-5213/insertSigcolumnsNum4096.json b/tests/system-test/5-taos-tools/taosbenchmark/TD-5213/insertSigcolumnsNum4096.json
deleted file mode 100755
index d7225dfd129c76db181cfc93789ac0f7a535d0fa..0000000000000000000000000000000000000000
--- a/tests/system-test/5-taos-tools/taosbenchmark/TD-5213/insertSigcolumnsNum4096.json
+++ /dev/null
@@ -1,137 +0,0 @@
-{
- "filetype": "insert",
- "cfgdir": "/etc/taos",
- "host": "127.0.0.1",
- "port": 6030,
- "user": "root",
- "password": "taosdata",
- "thread_count": 10,
- "thread_count_create_tbl": 10,
- "result_file": "./insert_res.txt",
- "confirm_parameter_prompt": "no",
- "insert_interval": 0,
- "interlace_rows": 10,
- "num_of_records_per_req": 1,
- "max_sql_len": 102400000,
- "databases": [{
- "dbinfo": {
- "name": "json_test",
- "drop": "yes",
- "replica": 1,
- "days": 10,
- "cache": 50,
- "blocks": 8,
- "precision": "ms",
- "keep": 36500,
- "minRows": 100,
- "maxRows": 4096,
- "comp":2,
- "walLevel":1,
- "cachelast":0,
- "quorum":1,
- "fsync":3000,
- "update": 0
- },
- "super_tables": [{
- "name": "stb_old",
- "child_table_exists":"no",
- "childtable_count": 2,
- "childtable_prefix": "stb_old_",
- "auto_create_table": "no",
- "batch_create_tbl_num": 5,
- "data_source": "rand",
- "insert_mode": "taosc",
- "insert_rows": 2,
- "childtable_limit": 0,
- "childtable_offset":0,
- "multi_thread_write_one_tbl": "no",
- "interlace_rows": 0,
- "insert_interval":0,
- "max_sql_len": 1024000,
- "disorder_ratio": 0,
- "disorder_range": 1000,
- "timestamp_step": 1,
- "start_timestamp": "2020-10-01 00:00:00.000",
- "sample_format": "csv",
- "sample_file": "./5-taos-tools/taosbenchmark/TD-5213/insertSigcolumnsNum4096.csv",
- "tags_file": "",
- "columns": [{"type": "INT","count":1000}, {"type": "BINARY", "len": 16, "count":20}],
- "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
- },{
- "name": "stb_new",
- "child_table_exists":"no",
- "childtable_count": 2,
- "childtable_prefix": "stb_new_",
- "auto_create_table": "no",
- "batch_create_tbl_num": 5,
- "data_source": "rand",
- "insert_mode": "taosc",
- "insert_rows": 2,
- "childtable_limit": 0,
- "childtable_offset":0,
- "multi_thread_write_one_tbl": "no",
- "interlace_rows": 0,
- "insert_interval":0,
- "max_sql_len": 1024000,
- "disorder_ratio": 0,
- "disorder_range": 1000,
- "timestamp_step": 1,
- "start_timestamp": "2020-10-01 00:00:00.000",
- "sample_format": "csv",
- "sample_file": "./5-taos-tools/taosbenchmark/sample.csv",
- "tags_file": "",
- "columns": [{"type": "INT","count":4000}, {"type": "BINARY", "len": 16, "count":90}],
- "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":3}]
- },{
- "name": "stb_mix",
- "child_table_exists":"no",
- "childtable_count": 2,
- "childtable_prefix": "stb_mix_",
- "auto_create_table": "no",
- "batch_create_tbl_num": 5,
- "data_source": "rand",
- "insert_mode": "taosc",
- "insert_rows": 2,
- "childtable_limit": 0,
- "childtable_offset":0,
- "multi_thread_write_one_tbl": "no",
- "interlace_rows": 0,
- "insert_interval":0,
- "max_sql_len": 1024000,
- "disorder_ratio": 0,
- "disorder_range": 1000,
- "timestamp_step": 1,
- "start_timestamp": "2020-10-01 00:00:00.000",
- "sample_format": "csv",
- "sample_file": "./5-taos-tools/taosbenchmark/sample.csv",
- "tags_file": "",
- "columns": [{"type": "INT","count":500},{"type": "SMALLINT","count":500},{"type": "TINYINT","count":500},{"type": "DOUBLE","count":500},{"type": "FLOAT","count":500},{"type": "BOOL","count":500},{"type": "BIGINT","count":500},{"type": "NCHAR","len": 20,"count":300},{"type": "BINARY","len": 34,"count":290},{"type": "BINARY","len": 101,"count":1}],
- "tags": [{"type": "INT", "count":3}, {"type": "NCHAR", "len": 10, "count":1}]
- },{
- "name": "stb_excel",
- "child_table_exists":"no",
- "childtable_count": 2,
- "childtable_prefix": "stb_excel_",
- "auto_create_table": "no",
- "batch_create_tbl_num": 5,
- "data_source": "sample",
- "insert_mode": "taosc",
- "insert_rows": 2,
- "childtable_limit": 0,
- "childtable_offset":0,
- "multi_thread_write_one_tbl": "no",
- "interlace_rows": 0,
- "insert_interval":0,
- "max_sql_len": 1024000,
- "disorder_ratio": 0,
- "disorder_range": 1000,
- "timestamp_step": 1,
- "start_timestamp": "2020-10-01 00:00:00.000",
- "sample_format": "csv",
- "sample_file": "./5-taos-tools/taosbenchmark/TD-5213/insertSigcolumnsNum4096.csv",
- "tags_file": "",
- "columns": [{"type": "INT","count":500},{"type": "SMALLINT","count":500},{"type": "SMALLINT","count":500},{"type": "DOUBLE","count":500},{"type": "FLOAT","count":500},{"type": "BOOL","count":500},{"type": "BIGINT","count":500},{"type": "NCHAR","len": 19,"count":300},{"type": "BINARY","len": 34,"count":290},{"type": "BINARY","len": 101,"count":1}],
- "tags": [{"type": "INT", "count":3}, {"type": "NCHAR", "len": 10, "count":1}]
- }]
- }]
-}
diff --git a/tests/system-test/5-taos-tools/taosbenchmark/TD-5213/insertSigcolumnsNum4096.py b/tests/system-test/5-taos-tools/taosbenchmark/TD-5213/insertSigcolumnsNum4096.py
deleted file mode 100755
index 56b51f5498aed0a540a86bf03625266ad3599b58..0000000000000000000000000000000000000000
--- a/tests/system-test/5-taos-tools/taosbenchmark/TD-5213/insertSigcolumnsNum4096.py
+++ /dev/null
@@ -1,176 +0,0 @@
-###################################################################
-# Copyright (c) 2016 by TAOS Technologies, Inc.
-# All rights reserved.
-#
-# This file is proprietary and confidential to TAOS Technologies.
-# No part of this file may be reproduced, stored, transmitted,
-# disclosed or used in any form or by any means other than as
-# expressly provided by the written permission from Jianhui Tao
-#
-###################################################################
-
-# -*- coding: utf-8 -*-
-
-import sys
-import os
-import time
-from util.log import *
-from util.cases import *
-from util.sql import *
-from util.dnodes import *
-
-
-class TDTestCase:
- def init(self, conn, logSql):
- tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(), logSql)
-
- def getBuildPath(self):
- selfPath = os.path.dirname(os.path.realpath(__file__))
-
- if ("community" in selfPath):
- projPath = selfPath[:selfPath.find("community")]
- else:
- projPath = selfPath[:selfPath.find("tests")]
-
- for root, dirs, files in os.walk(projPath):
- if ("taosd" in files):
- rootRealPath = os.path.dirname(os.path.realpath(root))
- if ("packaging" not in rootRealPath):
- buildPath = root[:len(root)-len("/build/bin")]
- break
- return buildPath
-
- def run(self):
- buildPath = self.getBuildPath()
- if (buildPath == ""):
- tdLog.exit("taosd not found!")
- else:
- tdLog.info("taosd found in %s" % buildPath)
- binPath = buildPath+ "/build/bin/"
-
- #-N:regular table -d:database name -t:table num -n:rows num per table -l:col num -y:force
- #regular old && new
- startTime = time.time()
- os.system("%staosBenchmark -N -d regular_old -t 1 -n 10 -l 1023 -y" % binPath)
- tdSql.execute("use regular_old")
- tdSql.query("show tables;")
- tdSql.checkRows(1)
- tdSql.query("select * from d0;")
- tdSql.checkCols(1024)
- tdSql.query("describe d0;")
- tdSql.checkRows(1024)
-
- os.system("%staosBenchmark -N -d regular_new -t 1 -n 10 -l 4095 -y" % binPath)
- tdSql.execute("use regular_new")
- tdSql.query("show tables;")
- tdSql.checkRows(1)
- tdSql.query("select * from d0;")
- tdSql.checkCols(4096)
- tdSql.query("describe d0;")
- tdSql.checkRows(4096)
-
- #super table -d:database name -t:table num -n:rows num per table -l:col num -y:force
- os.system("%staosBenchmark -d super_old -t 1 -n 10 -l 1021 -y" % binPath)
- tdSql.execute("use super_old")
- tdSql.query("show tables;")
- tdSql.checkRows(1)
- tdSql.query("select * from meters;")
- tdSql.checkCols(1024)
- tdSql.query("select * from d0;")
- tdSql.checkCols(1022)
- tdSql.query("describe meters;")
- tdSql.checkRows(1024)
- tdSql.query("describe d0;")
- tdSql.checkRows(1024)
-
- os.system("%staosBenchmark -d super_new -t 1 -n 10 -l 4093 -y" % binPath)
- tdSql.execute("use super_new")
- tdSql.query("show tables;")
- tdSql.checkRows(1)
- tdSql.query("select * from meters;")
- tdSql.checkCols(4096)
- tdSql.query("select * from d0;")
- tdSql.checkCols(4094)
- tdSql.query("describe meters;")
- tdSql.checkRows(4096)
- tdSql.query("describe d0;")
- tdSql.checkRows(4096)
- tdSql.execute("create table stb_new1_1 using meters tags(1,2)")
- tdSql.query("select * from stb_new1_1")
- tdSql.checkCols(4094)
- tdSql.query("describe stb_new1_1;")
- tdSql.checkRows(4096)
-
- # insert: create one or mutiple tables per sql and insert multiple rows per sql
- # test case for https://jira.taosdata.com:18080/browse/TD-5213
- os.system("%staosBenchmark -f tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json -y " % binPath)
- tdSql.execute("use json_test")
- tdSql.query("select count (tbname) from stb_old")
- tdSql.checkData(0, 0, 1)
-
- tdSql.query("select * from stb_old")
- tdSql.checkRows(10)
- tdSql.checkCols(1024)
-
- tdSql.query("select count (tbname) from stb_new")
- tdSql.checkData(0, 0, 1)
-
- tdSql.query("select * from stb_new")
- tdSql.checkRows(10)
- tdSql.checkCols(4096)
- tdSql.query("describe stb_new;")
- tdSql.checkRows(4096)
- tdSql.query("select * from stb_new_0")
- tdSql.checkRows(10)
- tdSql.checkCols(4091)
- tdSql.query("describe stb_new_0;")
- tdSql.checkRows(4096)
- tdSql.execute("create table stb_new1_1 using stb_new tags(1,2,3,4,5)")
- tdSql.query("select * from stb_new1_1")
- tdSql.checkCols(4091)
- tdSql.query("describe stb_new1_1;")
- tdSql.checkRows(4096)
-
- tdSql.query("select count (tbname) from stb_mix")
- tdSql.checkData(0, 0, 1)
-
- tdSql.query("select * from stb_mix")
- tdSql.checkRows(10)
- tdSql.checkCols(4096)
- tdSql.query("describe stb_mix;")
- tdSql.checkRows(4096)
- tdSql.query("select * from stb_mix_0")
- tdSql.checkRows(10)
- tdSql.checkCols(4092)
- tdSql.query("describe stb_mix_0;")
- tdSql.checkRows(4096)
-
- tdSql.query("select count (tbname) from stb_excel")
- tdSql.checkData(0, 0, 1)
-
- tdSql.query("select * from stb_excel")
- tdSql.checkRows(10)
- tdSql.checkCols(4096)
- tdSql.query("describe stb_excel;")
- tdSql.checkRows(4096)
- tdSql.query("select * from stb_excel_0")
- tdSql.checkRows(10)
- tdSql.checkCols(4092)
- tdSql.query("describe stb_excel_0;")
- tdSql.checkRows(4096)
- endTime = time.time()
- print("total time %ds" % (endTime - startTime))
-
-
- os.system("rm -rf tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py.sql")
-
-
-
- def stop(self):
- tdSql.close()
- tdLog.success("%s successfully executed" % __file__)
-
-
-tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/tsim/src/simExe.c b/tests/tsim/src/simExe.c
index e880f1e44690c117e7099cecf9e7f452003f441d..d1288213c5baa823d4ebd78e2b564de520a44aa6 100644
--- a/tests/tsim/src/simExe.c
+++ b/tests/tsim/src/simExe.c
@@ -143,13 +143,13 @@ char *simGetVariable(SScript *script, char *varName, int32_t varLen) {
return var->varValue;
}
-int32_t simExecuteExpression(SScript *script, char *exp) {
+int32_t simExecuteExpression(SScript *script, char *expr) {
char * op1, *op2, *var1, *var2, *var3, *rest;
int32_t op1Len, op2Len, var1Len, var2Len, var3Len, val0, val1;
char t0[1024], t1[1024], t2[1024], t3[2048];
int32_t result;
- rest = paGetToken(exp, &var1, &var1Len);
+ rest = paGetToken(expr, &var1, &var1Len);
rest = paGetToken(rest, &op1, &op1Len);
rest = paGetToken(rest, &var2, &var2Len);
rest = paGetToken(rest, &op2, &op2Len);
diff --git a/tests/tsim/src/simParse.c b/tests/tsim/src/simParse.c
index 1acdcd2ac6eb0ecb66e2977dee7577393ed242ef..7de263000645cc3b5078ab96415620973149788a 100644
--- a/tests/tsim/src/simParse.c
+++ b/tests/tsim/src/simParse.c
@@ -251,11 +251,11 @@ SScript *simParseScript(char *fileName) {
return script;
}
-int32_t simCheckExpression(char *exp) {
+int32_t simCheckExpression(char *expr) {
char * op1, *op2, *op, *rest;
int32_t op1Len, op2Len, opLen;
- rest = paGetToken(exp, &op1, &op1Len);
+ rest = paGetToken(expr, &op1, &op1Len);
if (op1Len == 0) {
sprintf(parseErr, "expression is required");
return -1;
@@ -295,10 +295,10 @@ int32_t simCheckExpression(char *exp) {
rest = paGetToken(rest, &op, &opLen);
- if (opLen == 0) return (int32_t)(rest - exp);
+ if (opLen == 0) return (int32_t)(rest - expr);
/* if it is key word "then" */
- if (strncmp(op, "then", 4) == 0) return (int32_t)(op - exp);
+ if (strncmp(op, "then", 4) == 0) return (int32_t)(op - expr);
rest = paGetToken(rest, &op2, &op2Len);
if (op2Len == 0) {
@@ -312,7 +312,7 @@ int32_t simCheckExpression(char *exp) {
}
if (op[0] == '+' || op[0] == '-' || op[0] == '*' || op[0] == '/' || op[0] == '.') {
- return (int32_t)(rest - exp);
+ return (int32_t)(rest - expr);
}
return -1;
diff --git a/tests/tsim/src/simSystem.c b/tests/tsim/src/simSystem.c
index 0879e371ef62fee81786728e2b980442567fbaa1..7569d3fc7ddf0708d093b92bb9896277d7134416 100644
--- a/tests/tsim/src/simSystem.c
+++ b/tests/tsim/src/simSystem.c
@@ -43,9 +43,9 @@ char *simParseArbitratorName(char *varName) {
char *simParseHostName(char *varName) {
static char hostName[140];
- int32_t index = atoi(varName + 8);
+ int32_t idx = atoi(varName + 8);
int32_t port = 7100;
- switch (index) {
+ switch (idx) {
case 1:
port = 7100;
break;