diff --git a/Jenkinsfile2 b/Jenkinsfile2
index 1920d8da173cc0cf24d70137e1f6f3e1ac35ca5f..54234cc54789f23c000538b5669841c956992efe 100644
--- a/Jenkinsfile2
+++ b/Jenkinsfile2
@@ -430,7 +430,7 @@ pipeline {
date
rm -rf ${WKC}/debug
cd ${WKC}/tests/parallel_test
- time ./container_build.sh -w ${WKDIR} -t 10 -e
+ time ./container_build.sh -w ${WKDIR} -e
'''
def extra_param = ""
def log_server_file = "/home/log_server.json"
diff --git a/cmake/cmake.version b/cmake/cmake.version
index ba85a3d99b8280a49d9d6e6475cbeb3807090d28..a4c783b6c8cfa4c3c1bea4eb7f4ac40b165efe87 100644
--- a/cmake/cmake.version
+++ b/cmake/cmake.version
@@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
- SET(TD_VER_NUMBER "3.0.2.2")
+ SET(TD_VER_NUMBER "3.0.2.4")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
diff --git a/cmake/taosadapter_CMakeLists.txt.in b/cmake/taosadapter_CMakeLists.txt.in
index 3e2e879e389fc7f6686949efab43bc5fada33f3a..13b247770ea7eef6b64209ca98787ff6d733bf85 100644
--- a/cmake/taosadapter_CMakeLists.txt.in
+++ b/cmake/taosadapter_CMakeLists.txt.in
@@ -2,7 +2,7 @@
# taosadapter
ExternalProject_Add(taosadapter
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
- GIT_TAG a2e9920
+ GIT_TAG 213f8b3
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in
index 115e2fc674c982c755397a8dd745057e14e0ac50..13a81f88eab42c64be7ea0cf759da21ddce7a456 100644
--- a/cmake/taostools_CMakeLists.txt.in
+++ b/cmake/taostools_CMakeLists.txt.in
@@ -2,7 +2,7 @@
# taos-tools
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
- GIT_TAG 4776778
+ GIT_TAG 0cd564a
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
diff --git a/docs/en/07-develop/03-insert-data/30-influxdb-line.mdx b/docs/en/07-develop/03-insert-data/30-influxdb-line.mdx
index 3c088602602301fafa824bc256f1f2caca128abd..fc5644850cbedcb91de2aebca29070dc3c021551 100644
--- a/docs/en/07-develop/03-insert-data/30-influxdb-line.mdx
+++ b/docs/en/07-develop/03-insert-data/30-influxdb-line.mdx
@@ -38,7 +38,7 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
- Each data in `field_set` must be self-descriptive for its data type. For example 1.2f32 means a value 1.2 of float type. Without the "f" type suffix, it will be treated as type double.
- Multiple kinds of precision can be used for the `timestamp` field. Time precision can be from nanosecond (ns) to hour (h).
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be created automatically. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
-- It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3)
+- It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3, smlDataFormat is discarded since 3.0.3.0)
:::
For more details please refer to [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) and [TDengine Schemaless](/reference/schemaless/#Schemaless-Line-Protocol)
diff --git a/docs/en/07-develop/07-tmq.mdx b/docs/en/07-develop/07-tmq.mdx
index 17b3f5caa062eaacb4216b7153e899040e702cc1..92db7d4cbf4bcb35b3f30483e38be4c627a6b821 100644
--- a/docs/en/07-develop/07-tmq.mdx
+++ b/docs/en/07-develop/07-tmq.mdx
@@ -94,22 +94,21 @@ void close() throws SQLException;
```python
-class TaosConsumer():
- def __init__(self, *topics, **configs)
+class Consumer:
+ def subscribe(self, topics):
+ pass
- def __iter__(self)
+ def unsubscribe(self):
+ pass
- def __next__(self)
+ def poll(self, timeout: float = 1.0):
+ pass
- def sync_next(self)
-
- def subscription(self)
+ def close(self):
+ pass
- def unsubscribe(self)
-
- def close(self)
-
- def __del__(self)
+ def commit(self, message):
+ pass
```
@@ -117,19 +116,22 @@ class TaosConsumer():
```go
-func NewConsumer(conf *Config) (*Consumer, error)
-
-func (c *Consumer) Close() error
+func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)
-func (c *Consumer) Commit(ctx context.Context, message unsafe.Pointer) error
+// rebalanceCb is reserved for compatibility purpose
+func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error
-func (c *Consumer) FreeMessage(message unsafe.Pointer)
+// rebalanceCb is reserved for compatibility purpose
+func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error
-func (c *Consumer) Poll(timeout time.Duration) (*Result, error)
+func (c *Consumer) Poll(timeoutMs int) tmq.Event
-func (c *Consumer) Subscribe(topics []string) error
+// tmq.TopicPartition is reserved for compatibility purpose
+func (c *Consumer) Commit() ([]tmq.TopicPartition, error)
func (c *Consumer) Unsubscribe() error
+
+func (c *Consumer) Close() error
```
@@ -357,50 +359,20 @@ public class MetersDeserializer extends ReferenceDeserializer {
```go
-config := tmq.NewConfig()
-defer config.Destroy()
-err = config.SetGroupID("test")
-if err != nil {
- panic(err)
-}
-err = config.SetAutoOffsetReset("earliest")
-if err != nil {
- panic(err)
-}
-err = config.SetConnectIP("127.0.0.1")
-if err != nil {
- panic(err)
-}
-err = config.SetConnectUser("root")
-if err != nil {
- panic(err)
-}
-err = config.SetConnectPass("taosdata")
-if err != nil {
- panic(err)
-}
-err = config.SetConnectPort("6030")
-if err != nil {
- panic(err)
-}
-err = config.SetMsgWithTableName(true)
-if err != nil {
- panic(err)
-}
-err = config.EnableHeartBeat()
-if err != nil {
- panic(err)
-}
-err = config.EnableAutoCommit(func(result *wrapper.TMQCommitCallbackResult) {
- if result.ErrCode != 0 {
- errStr := wrapper.TMQErr2Str(result.ErrCode)
- err := errors.NewError(int(result.ErrCode), errStr)
- panic(err)
- }
-})
-if err != nil {
- panic(err)
+conf := &tmq.ConfigMap{
+ "group.id": "test",
+ "auto.offset.reset": "earliest",
+ "td.connect.ip": "127.0.0.1",
+ "td.connect.user": "root",
+ "td.connect.pass": "taosdata",
+ "td.connect.port": "6030",
+ "client.id": "test_tmq_c",
+ "enable.auto.commit": "false",
+ "enable.heartbeat.background": "true",
+ "experimental.snapshot.enable": "true",
+ "msg.with.table.name": "true",
}
+consumer, err := NewConsumer(conf)
```
@@ -422,23 +394,31 @@ let mut consumer = tmq.build()?;
+```python
+from taos.tmq import Consumer
+
+# Syntax: `consumer = Consumer(configs)`
+#
+# Example:
+consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
+```
+
Python programs use the following parameters:
-| Parameter | Type | Description | Remarks |
-| :----------------------------: | :----: | -------------------------------------------------------- | ------------------------------------------- |
-| `td_connect_ip` | string | Used in establishing a connection; same as `taos_connect` | |
-| `td_connect_user` | string | Used in establishing a connection; same as `taos_connect` | |
-| `td_connect_pass` | string | Used in establishing a connection; same as `taos_connect` | |
-| `td_connect_port` | string | Used in establishing a connection; same as `taos_connect` | |
-| `group_id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. |
-| `client_id` | string | Client ID | Maximum length: 192. |
-| `auto_offset_reset` | string | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
-| `enable_auto_commit` | string | Commit automatically | Specify `true` or `false`. |
-| `auto_commit_interval_ms` | string | Interval for automatic commits, in milliseconds |
-| `enable_heartbeat_background` | string | Backend heartbeat; if enabled, the consumer does not go offline even if it has not polled for a long time | Specify `true` or `false`. |
-| `experimental_snapshot_enable` | string | Specify whether to consume messages from the WAL or from TSBS | Specify `true` or `false`. |
-| `msg_with_table_name` | string | Specify whether to deserialize table names from messages | Specify `true` or `false`.
-| `timeout` | int | Consumer pull timeout | |
+| Parameter | Type | Description | Remarks |
+|:---------:|:----:|:-----------:|:-------:|
+| `td.connect.ip` | string | Used in establishing a connection||
+| `td.connect.user` | string | Used in establishing a connection||
+| `td.connect.pass` | string | Used in establishing a connection||
+| `td.connect.port` | string | Used in establishing a connection||
+| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192 |
+| `client.id` | string | Client ID | Maximum length: 192 |
+| `msg.with.table.name` | string | Specify whether to deserialize table names from messages | pecify `true` or `false` |
+| `enable.auto.commit` | string | Commit automatically | pecify `true` or `false` |
+| `auto.commit.interval.ms` | string | Interval for automatic commits, in milliseconds | |
+| `auto.offset.reset` | string | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
+| `experimental.snapshot.enable` | string | Specify whether to consume messages from the WAL or from TSDB | Specify `true` or `false` |
+| `enable.heartbeat.background` | string | Backend heartbeat; if enabled, the consumer does not go offline even if it has not polled for a long time | Specify `true` or `false` |
@@ -523,11 +503,7 @@ consumer.subscribe(topics);
```go
-consumer, err := tmq.NewConsumer(config)
-if err != nil {
- panic(err)
-}
-err = consumer.Subscribe([]string{"example_tmq_topic"})
+err = consumer.Subscribe("example_tmq_topic", nil)
if err != nil {
panic(err)
}
@@ -545,7 +521,7 @@ consumer.subscribe(["tmq_meters"]).await?;
```python
-consumer = TaosConsumer('topic_ctb_column', group_id='vg2')
+consumer.subscribe(['topic1', 'topic2'])
```
@@ -611,13 +587,17 @@ while(running){
```go
for {
- result, err := consumer.Poll(time.Second)
- if err != nil {
- panic(err)
+ ev := consumer.Poll(0)
+ if ev != nil {
+ switch e := ev.(type) {
+ case *tmqcommon.DataMessage:
+ fmt.Println(e.Value())
+ case tmqcommon.Error:
+ fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e)
+ panic(e)
+ }
+ consumer.Commit()
}
- fmt.Println(result)
- consumer.Commit(context.Background(), result.Message)
- consumer.FreeMessage(result.Message)
}
```
@@ -660,9 +640,17 @@ for {
```python
-for msg in consumer:
- for row in msg:
- print(row)
+while True:
+ res = consumer.poll(100)
+ if not res:
+ continue
+ err = res.error()
+ if err is not None:
+ raise err
+ val = res.value()
+
+ for block in val:
+ print(block.fetchall())
```
@@ -729,7 +717,11 @@ consumer.close();
```go
-consumer.Close()
+/* Unsubscribe */
+_ = consumer.Unsubscribe()
+
+/* Close consumer */
+_ = consumer.Close()
```
diff --git a/docs/en/10-deployment/05-helm.md b/docs/en/10-deployment/05-helm.md
index a4fa68100078efe85fff5e1b078ebd07e5337d5a..90baa5f4459c6d9d01530d5e0c36477d79aa5ab9 100644
--- a/docs/en/10-deployment/05-helm.md
+++ b/docs/en/10-deployment/05-helm.md
@@ -22,7 +22,7 @@ Helm uses the kubectl and kubeconfig configurations to perform Kubernetes operat
To use TDengine Chart, download it from GitHub:
```bash
-wget https://github.com/taosdata/TDengine-Operator/raw/3.0/helm/tdengine-3.0.0.tgz
+wget https://github.com/taosdata/TDengine-Operator/raw/3.0/helm/tdengine-3.0.2.tgz
```
@@ -38,7 +38,7 @@ With minikube, the default value is standard.
Use Helm commands to install TDengine:
```bash
-helm install tdengine tdengine-3.0.0.tgz \
+helm install tdengine tdengine-3.0.2.tgz \
--set storage.className=
```
@@ -46,7 +46,7 @@ helm install tdengine tdengine-3.0.0.tgz \
You can configure a small storage size in minikube to ensure that your deployment does not exceed your available disk space.
```bash
-helm install tdengine tdengine-3.0.0.tgz \
+helm install tdengine tdengine-3.0.2.tgz \
--set storage.className=standard \
--set storage.dataSize=2Gi \
--set storage.logSize=10Mi
@@ -83,14 +83,14 @@ You can configure custom parameters in TDengine with the `values.yaml` file.
Run the `helm show values` command to see all parameters supported by TDengine Chart.
```bash
-helm show values tdengine-3.0.0.tgz
+helm show values tdengine-3.0.2.tgz
```
Save the output of this command as `values.yaml`. Then you can modify this file with your desired values and use it to deploy a TDengine cluster:
```bash
-helm install tdengine tdengine-3.0.0.tgz -f values.yaml
+helm install tdengine tdengine-3.0.2.tgz -f values.yaml
```
@@ -107,7 +107,7 @@ image:
prefix: tdengine/tdengine
#pullPolicy: Always
# Overrides the image tag whose default is the chart appVersion.
-# tag: "3.0.0.0"
+# tag: "3.0.2.0"
service:
# ClusterIP is the default service type, use NodeIP only if you know what you are doing.
@@ -155,15 +155,15 @@ clusterDomainSuffix: ""
# See the [Configuration Variables](../../reference/config)
#
# Note:
-# 1. firstEp/secondEp: should not be setted here, it's auto generated at scale-up.
-# 2. serverPort: should not be setted, we'll use the default 6030 in many places.
-# 3. fqdn: will be auto generated in kubenetes, user should not care about it.
+# 1. firstEp/secondEp: should not be set here, it's auto generated at scale-up.
+# 2. serverPort: should not be set, we'll use the default 6030 in many places.
+# 3. fqdn: will be auto generated in kubernetes, user should not care about it.
# 4. role: currently role is not supported - every node is able to be mnode and vnode.
#
# Btw, keep quotes "" around the value like below, even the value will be number or not.
taoscfg:
# Starts as cluster or not, must be 0 or 1.
- # 0: all pods will start as a seperate TDengine server
+ # 0: all pods will start as a separate TDengine server
# 1: pods will start as TDengine server cluster. [default]
CLUSTER: "1"
diff --git a/docs/en/12-taos-sql/02-database.md b/docs/en/12-taos-sql/02-database.md
index c3c7e5928bb6705939dfae7d4e0096b202025520..059f124ea5b5f380cd72ba55a20b6cf3a80b035e 100644
--- a/docs/en/12-taos-sql/02-database.md
+++ b/docs/en/12-taos-sql/02-database.md
@@ -30,8 +30,10 @@ database_option: {
| WAL_LEVEL {1 | 2}
| VGROUPS value
| SINGLE_STABLE {0 | 1}
+ | STT_TRIGGER value
| TABLE_PREFIX value
| TABLE_SUFFIX value
+ | TSDB_PAGESIZE value
| WAL_RETENTION_PERIOD value
| WAL_ROLL_PERIOD value
| WAL_RETENTION_SIZE value
@@ -56,7 +58,7 @@ database_option: {
- WAL_FSYNC_PERIOD: specifies the interval (in milliseconds) at which data is written from the WAL to disk. This parameter takes effect only when the WAL parameter is set to 2. The default value is 3000. Enter a value between 0 and 180000. The value 0 indicates that incoming data is immediately written to disk.
- MAXROWS: specifies the maximum number of rows recorded in a block. The default value is 4096.
- MINROWS: specifies the minimum number of rows recorded in a block. The default value is 100.
-- KEEP: specifies the time for which data is retained. Enter a value between 1 and 365000. The default value is 3650. The value of the KEEP parameter must be greater than or equal to the value of the DURATION parameter. TDengine automatically deletes data that is older than the value of the KEEP parameter. You can use m (minutes), h (hours), and d (days) as the unit, for example KEEP 100h or KEEP 10d. If you do not include a unit, d is used by default.
+- KEEP: specifies the time for which data is retained. Enter a value between 1 and 365000. The default value is 3650. The value of the KEEP parameter must be greater than or equal to the value of the DURATION parameter. TDengine automatically deletes data that is older than the value of the KEEP parameter. You can use m (minutes), h (hours), and d (days) as the unit, for example KEEP 100h or KEEP 10d. If you do not include a unit, d is used by default. The Enterprise Edition supports [Tiered Storage](https://docs.tdengine.com/tdinternal/arch/#tiered-storage) function, thus multiple KEEP values (comma separated and up to 3 values supported, and meet keep 0 <= keep 1 <= keep 2, e.g. KEEP 100h,100d,3650d) are supported; the Community Edition does not support Tiered Storage function (although multiple keep values are configured, they do not take effect, only the maximum keep value is used as KEEP).
- PAGES: specifies the number of pages in the metadata storage engine cache on each vnode. Enter a value greater than or equal to 64. The default value is 256. The space occupied by metadata storage on each vnode is equal to the product of the values of the PAGESIZE and PAGES parameters. The space occupied by default is 1 MB.
- PAGESIZE: specifies the size (in KB) of each page in the metadata storage engine cache on each vnode. The default value is 4. Enter a value between 1 and 16384.
- PRECISION: specifies the precision at which a database records timestamps. Enter ms for milliseconds, us for microseconds, or ns for nanoseconds. The default value is ms.
@@ -69,8 +71,10 @@ database_option: {
- SINGLE_STABLE: specifies whether the database can contain more than one supertable.
- 0: The database can contain multiple supertables.
- 1: The database can contain only one supertable.
+- STT_TRIGGER: specifies the number of file merges triggered by flushed files. The default is 8, ranging from 1 to 16. For high-frequency scenarios with few tables, it is recommended to use the default configuration or a smaller value for this parameter; For multi-table low-frequency scenarios, it is recommended to configure this parameter with a larger value.
- TABLE_PREFIX:The prefix length in the table name that is ignored when distributing table to vnode based on table name.
- TABLE_SUFFIX:The suffix length in the table name that is ignored when distributing table to vnode based on table name.
+- TSDB_PAGESIZE: The page size of the data storage engine in a vnode. The unit is KB. The default is 4 KB. The range is 1 to 16384, that is, 1 KB to 16 MB.
- WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. Enter a time in seconds. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is 4 days.
- WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. Enter a size in KB. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is -1.
- WAL_ROLL_PERIOD: specifies the time after which WAL files are rotated. After this period elapses, a new WAL file is created. The default value of single copy is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk. The default values of multiple copy is 1 day.
@@ -112,6 +116,10 @@ alter_database_options:
alter_database_option: {
CACHEMODEL {'none' | 'last_row' | 'last_value' | 'both'}
| CACHESIZE value
+ | BUFFER value
+ | PAGES value
+ | REPLICA value
+ | STT_TRIGGER value
| WAL_LEVEL value
| WAL_FSYNC_PERIOD value
| KEEP value
@@ -154,3 +162,19 @@ TRIM DATABASE db_name;
```
The preceding SQL statement deletes data that has expired and orders the remaining data in accordance with the storage configuration.
+
+## Redistribute Vgroup
+
+```sql
+REDISTRIBUTE VGROUP vgroup_no DNODE dnode_id1 [DNODE dnode_id2] [DNODE dnode_id3]
+```
+
+Adjust the distribution of vnodes in the vgroup according to the given list of dnodes.
+
+## Balance Vgroup
+
+```sql
+BALANCE VGROUP
+```
+
+Automatically adjusts the distribution of vnodes in all vgroups of the cluster, which is equivalent to load balancing the data of the cluster at the vnode level.
diff --git a/docs/en/12-taos-sql/06-select.md b/docs/en/12-taos-sql/06-select.md
index c087a9e9fb2f0af921aa031d41d124c66fbb0ae7..ee06a7be2d3172210bf35302d5bffbf7a49adabc 100644
--- a/docs/en/12-taos-sql/06-select.md
+++ b/docs/en/12-taos-sql/06-select.md
@@ -350,9 +350,9 @@ SELECT AVG(CASE WHEN voltage < 200 or voltage > 250 THEN 220 ELSE voltage END) F
## JOIN
-TDengine supports natural joins between supertables, between standard tables, and between subqueries. The difference between natural joins and inner joins is that natural joins require that the fields being joined in the supertables or standard tables must have the same name. Data or tag columns must be joined with the equivalent column in another table.
+TDengine supports the `INTER JOIN` based on the timestamp primary key, that is, the `JOIN` condition must contain the timestamp primary key. As long as the requirement of timestamp-based primary key is met, `INTER JOIN` can be made between normal tables, sub-tables, super tables and sub-queries at will, and there is no limit on the number of tables.
-For standard tables, only the timestamp (primary key) can be used in join operations. For example:
+For standard tables:
```sql
SELECT *
@@ -360,7 +360,7 @@ FROM temp_tb_1 t1, pressure_tb_1 t2
WHERE t1.ts = t2.ts
```
-For supertables, tags as well as timestamps can be used in join operations. For example:
+For supertables:
```sql
SELECT *
@@ -368,20 +368,15 @@ FROM temp_stable t1, temp_stable t2
WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
```
-Similarly, join operations can be performed on the result sets of multiple subqueries.
-
-:::note
-
-The following restriction apply to JOIN statements:
+For sub-table and super table:
-- The number of tables or supertables in a single join operation cannot exceed 10.
-- `FILL` cannot be used in a JOIN statement.
-- Arithmetic operations cannot be performed on the result sets of join operation.
-- `GROUP BY` is not allowed on a segment of the tables that participate in a join operation.
-- `OR` cannot be used in the conditions for join operation
-- Join operation can be performed only on tags or timestamps. You cannot perform a join operation on data columns.
+```sql
+SELECT *
+FROM temp_ctable t1, temp_stable t2
+WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
+```
-:::
+Similarly, join operations can be performed on the result sets of multiple subqueries.
## Nested Query
diff --git a/docs/en/12-taos-sql/10-function.md b/docs/en/12-taos-sql/10-function.md
index 30422ca20cf44af4e1808eae2912e1591502d4c8..802eb259bf72687f9c75cdb34e3520040d3c9010 100644
--- a/docs/en/12-taos-sql/10-function.md
+++ b/docs/en/12-taos-sql/10-function.md
@@ -877,7 +877,7 @@ INTERP(expr)
- Interpolation is performed based on `FILL` parameter.
- `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable.
- Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.1.4).
-- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.2.1).
+- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.2.3).
### LAST
diff --git a/docs/en/12-taos-sql/24-show.md b/docs/en/12-taos-sql/24-show.md
index 2db3e7cb31463e20f024f48e62d06422519ba0e7..f70d86570e5fe9cf4f9eb6e58dd1908c62adcc89 100644
--- a/docs/en/12-taos-sql/24-show.md
+++ b/docs/en/12-taos-sql/24-show.md
@@ -363,7 +363,7 @@ Shows information about all vgroups in the system or about the vgroups for a spe
## SHOW VNODES
```sql
-SHOW VNODES [dnode_name];
+SHOW VNODES {dnode_id | dnode_endpoint};
```
Shows information about all vnodes in the system or about the vnodes for a specified dnode.
diff --git a/docs/en/12-taos-sql/29-changes.md b/docs/en/12-taos-sql/29-changes.md
index 78b6d5fc05b9b03e1e8b3af268bc357dfaa401bc..f288cd7545c1de25f6cf8b003e084ba3622524a7 100644
--- a/docs/en/12-taos-sql/29-changes.md
+++ b/docs/en/12-taos-sql/29-changes.md
@@ -54,7 +54,6 @@ The following data types can be used in the schema for standard tables.
| 27 | GRANT | Added | Grants permissions to a user.
| 28 | KILL TRANSACTION | Added | Terminates an mnode transaction.
| 29 | KILL STREAM | Deprecated | Terminated a continuous query. The continuous query feature has been replaced with the stream processing feature.
-| 30 | MERGE VGROUP | Added | Merges vgroups.
| 31 | REVOKE | Added | Revokes permissions from a user.
| 32 | SELECT | Modified |
SELECT does not use the implicit results column. Output columns must be specified in the SELECT clause.
DISTINCT support is enhanced. In previous versions, DISTINCT only worked on the tag column and could not be used with JOIN or GROUP BY.
JOIN support is enhanced. The following are now supported after JOIN: a WHERE clause with OR, operations on multiple tables, and GROUP BY on multiple tables.
Subqueries after FROM are enhanced. Levels of nesting are no longer restricted. Subqueries can be used with UNION ALL. Other syntax restrictions are eliminated.
All scalar functions can be used after WHERE.
GROUP BY is enhanced. You can group by any scalar expression or combination thereof.
SESSION can be used on supertables. When PARTITION BY is not used, data in supertables is merged into a single timeline.
STATE_WINDOW can be used on supertables. When PARTITION BY is not used, data in supertables is merged into a single timeline.
ORDER BY is enhanced. It is no longer required to use ORDER BY and GROUP BY together. There is no longer a restriction on the number of order expressions. NULLS FIRST and NULLS LAST syntax has been added. Any expression that conforms to the ORDER BY semantics can be used.
Added PARTITION BY syntax. PARTITION BY replaces GROUP BY tags.
| 33 | SHOW ACCOUNTS | Deprecated | This Enterprise Edition-only statement has been removed. It returns the error "This statement is no longer supported."
@@ -76,8 +75,9 @@ The following data types can be used in the schema for standard tables.
| 49 | SHOW TRANSACTIONS | Added | Shows all running transactions in the system.
| 50 | SHOW DNODE VARIABLES | Added | Shows the configuration of the specified dnode.
| 51 | SHOW VNODES | Not supported | Shows information about vnodes in the system. Not supported.
-| 52 | SPLIT VGROUP | Added | Splits a vgroup into two vgroups.
-| 53 | TRIM DATABASE | Added | Deletes data that has expired and orders the remaining data in accordance with the storage configuration.
+| 52 | TRIM DATABASE | Added | Deletes data that has expired and orders the remaining data in accordance with the storage configuration.
+| 53 | REDISTRIBUTE VGROUP | Added | Adjust the distribution of VNODES in VGROUP.
+| 54 | BALANCE VGROUP | Added | Auto adjust the distribution of VNODES in VGROUP.
## SQL Functions
diff --git a/docs/en/14-reference/03-connector/05-go.mdx b/docs/en/14-reference/03-connector/05-go.mdx
index df5b129cea552144d5833190d46e8a78f2fd2fa5..60407c0735bf9bcb42ae54bddcc9afa639a02fcc 100644
--- a/docs/en/14-reference/03-connector/05-go.mdx
+++ b/docs/en/14-reference/03-connector/05-go.mdx
@@ -355,26 +355,29 @@ The `af` package encapsulates TDengine advanced functions such as connection man
#### Subscribe
-* `func NewConsumer(conf *Config) (*Consumer, error)`
+* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)`
Creates consumer group.
-* `func (c *Consumer) Subscribe(topics []string) error`
+* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error`
+Note: `rebalanceCb` is reserved for compatibility purpose
+
+Subscribes a topic.
+
+* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error`
+Note: `rebalanceCb` is reserved for compatibility purpose
Subscribes to topics.
-* `func (c *Consumer) Poll(timeout time.Duration) (*Result, error)`
+* `func (c *Consumer) Poll(timeoutMs int) tmq.Event`
Polling information.
-* `func (c *Consumer) Commit(ctx context.Context, message unsafe.Pointer) error`
+* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)`
+Note: `tmq.TopicPartition` is reserved for compatibility purpose
Commit information.
-* `func (c *Consumer) FreeMessage(message unsafe.Pointer)`
-
-Free information.
-
* `func (c *Consumer) Unsubscribe() error`
Unsubscribe.
@@ -441,25 +444,36 @@ Close consumer.
### Subscribe via WebSocket
-* `func NewConsumer(config *Config) (*Consumer, error)`
+* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)`
- Creates consumer group.
+Creates consumer group.
+
+* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error`
+Note: `rebalanceCb` is reserved for compatibility purpose
-* `func (c *Consumer) Subscribe(topic []string) error`
+Subscribes a topic.
- Subscribes to topics.
+* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error`
+Note: `rebalanceCb` is reserved for compatibility purpose
-* `func (c *Consumer) Poll(timeout time.Duration) (*Result, error)`
+Subscribes to topics.
- Polling information.
+* `func (c *Consumer) Poll(timeoutMs int) tmq.Event`
-* `func (c *Consumer) Commit(messageID uint64) error`
+Polling information.
- Commit information.
+* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)`
+Note: `tmq.TopicPartition` is reserved for compatibility purpose
+
+Commit information.
+
+* `func (c *Consumer) Unsubscribe() error`
+
+Unsubscribe.
* `func (c *Consumer) Close() error`
- Close consumer.
+Close consumer.
For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/3.0/examples/tmqoverws/main.go)
diff --git a/docs/en/14-reference/03-connector/07-python.mdx b/docs/en/14-reference/03-connector/07-python.mdx
index 25e6b2188a64928e35b8e8c45988a426802eb9f3..d593c3f133dafa3b5f8083577f8f0d4d75cb3d8b 100644
--- a/docs/en/14-reference/03-connector/07-python.mdx
+++ b/docs/en/14-reference/03-connector/07-python.mdx
@@ -32,7 +32,7 @@ We recommend using the latest version of `taospy`, regardless of the version of
### Preparation
-1. Install Python. Python >= 3.7 is recommended. If Python is not available on your system, refer to the [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) to install it.
+1. Install Python. The recent taospy package requires Python 3.6+. The earlier versions of taospy require Python 3.7+. The taos-ws-py package requires Python 3.7+. If Python is not available on your system, refer to the [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) to install it.
2. Install [pip](https://pypi.org/project/pip/). In most cases, the Python installer comes with the pip utility. If not, please refer to [pip documentation](https://pip.pypa.io/en/stable/installation/) to install it.
If you use a native connection, you will also need to [Install Client Driver](/reference/connector#Install-Client-Driver). The client install package includes the TDengine client dynamic link library (`libtaos.so` or `taos.dll`) and the TDengine CLI.
@@ -78,6 +78,22 @@ pip3 install git+https://github.com/taosdata/taos-connector-python.git
+#### Install `taos-ws-py` (Optional)
+
+The taos-ws-py package provides the way to access TDengine via WebSocket.
+
+##### Install taos-ws-py with taospy
+
+```bash
+pip3 install taospy[ws]
+```
+
+##### Install taos-ws-py only
+
+```bash
+pip3 install taos-ws-py
+```
+
### Verify
diff --git a/docs/en/14-reference/03-connector/09-csharp.mdx b/docs/en/14-reference/03-connector/09-csharp.mdx
index 85514f58ac1a19c7ae1a725e9b055f10280ebbb6..756e948bd293477c37439b624bab9af86191e232 100644
--- a/docs/en/14-reference/03-connector/09-csharp.mdx
+++ b/docs/en/14-reference/03-connector/09-csharp.mdx
@@ -17,7 +17,7 @@ import CSAsyncQuery from "../../07-develop/04-query-data/_cs_async.mdx"
`TDengine.Connector` is a C# language connector provided by TDengine that allows C# developers to develop C# applications that access TDengine cluster data.
-The `TDengine.Connector` connector supports connect to TDengine instances via the TDengine client driver (taosc), providing data writing, querying, subscription, schemaless writing, bind interface, etc.The `TDengine.Connector` also supports WebSocket and developers can build connection through DSN, which supports data writing, querying, and parameter binding, etc.
+The `TDengine.Connector` connector supports connect to TDengine instances via the TDengine client driver (taosc), providing data writing, querying, subscription, schemaless writing, bind interface, etc.The `TDengine.Connector` also supports WebSocket from v3.0.1 and developers can build connection through DSN, which supports data writing, querying, and parameter binding, etc.
This article describes how to install `TDengine.Connector` in a Linux or Windows environment and connect to TDengine clusters via `TDengine.Connector` to perform basic operations such as data writing and querying.
@@ -66,31 +66,43 @@ Please refer to [version support list](/reference/connector#version-support)
* [Nuget Client](https://docs.microsoft.com/en-us/nuget/install-nuget-client-tools) (optional installation)
* Install TDengine client driver, please refer to [Install client driver](/reference/connector/#install-client-driver) for details
-### Install via dotnet CLI
+### Install `TDengine.Connector`
-
+
-You can reference the `TDengine.Connector` published in Nuget to the current project via the `dotnet` command under the path of the existing .NET project.
+You can reference the `TDengine.Connector` published in Nuget to the current project via the `dotnet` CLI under the path of the existing .NET project.
``` bash
dotnet add package TDengine.Connector
```
-
-
-
-You can [download the source code](https://github.com/taosdata/taos-connector-dotnet/tree/3.0) and directly reference the latest version of the TDengine.Connector library.
+You may also modify the current.NET project file. You can include the following 'ItemGroup' in your project file (.csproj).
-```bash
-git clone -b 3.0 https://github.com/taosdata/taos-connector-dotnet.git
-cd taos-connector-dotnet
-cp -r src/ myProject
+``` XML
+
+
+
+```
-cd myProject
-dotnet add exmaple.csproj reference src/TDengine.csproj
+
+
+
+In this scenario, modifying your project file is required in order to copy the WebSocket dependency dynamic library from the nuget package into your project.
+```XML
+
+
+
+
+
+
+
+
+
```
+Notice: `TDengine.Connector` only version>= 3.0.2 includes the dynamic library for WebSocket.
+
@@ -252,19 +264,20 @@ ws://localhost:6041/test
|Sample program |Sample program description |
|--------------------------------------------------------------------------------------------------------------------|--------------------------------------------|
-| [CURD](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/Query/Query.cs) | Table creation, data insertion, and query examples with TDengine.Connector |
-| [JSON Tag](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/JSONTag) | Writing and querying JSON tag data with TDengine Connector |
-| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/Stmt) | Parameter binding with TDengine Connector |
-| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/schemaless) | Schemaless writes with TDengine Connector |
-| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/AsyncQuery/QueryAsync.cs) | Asynchronous queries with TDengine Connector |
-| [Subscription](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/TMQ/TMQ.cs) | Subscription example with TDengine Connector |
-| [Basic WebSocket Usage](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSample.cs) | WebSocket basic data in and out with TDengine connector |
-| [WebSocket Parameter Binding](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSTMT.cs) | WebSocket parameter binding example |
+| [CURD](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/Query/Query.cs) | Table creation, data insertion, and query examples with TDengine.Connector |
+| [JSON Tag](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/JSONTag) | Writing and querying JSON tag data with TDengine Connector |
+| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/NET6Examples/Stmt) | Parameter binding with TDengine Connector |
+| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/schemaless) | Schemaless writes with TDengine Connector |
+| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/AsyncQuery/QueryAsync.cs) | Asynchronous queries with TDengine Connector |
+| [Subscription](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/TMQ/TMQ.cs) | Subscription example with TDengine Connector |
+| [Basic WebSocket Usage](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/FrameWork45/WS/WebSocketSample.cs) | WebSocket basic data in and out with TDengine connector |
+| [WebSocket Parameter Binding](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/FrameWork45/WS/WebSocketSTMT.cs) | WebSocket parameter binding example |
## Important update records
| TDengine.Connector | Description |
|--------------------|--------------------------------|
+| 3.0.2 | Support .NET Framework 4.5 and above. Support .Net standard 2.0. Nuget package includes dynamic library for WebSocket.|
| 3.0.1 | Support WebSocket and Cloud,With function query, insert, and parameter binding|
| 3.0.0 | Supports TDengine 3.0.0.0. TDengine 2.x is not supported. Added `TDengine.Impl.GetData()` interface to deserialize query results. |
| 1.0.7 | Fixed TDengine.Query() memory leak. |
diff --git a/docs/en/14-reference/03-connector/index.mdx b/docs/en/14-reference/03-connector/index.mdx
index ba8dbb85d4982c5d6c89f5dbe6157bd88a8c00a4..da3aae8309a9282a37597a3ac4bb623e6dfd1b79 100644
--- a/docs/en/14-reference/03-connector/index.mdx
+++ b/docs/en/14-reference/03-connector/index.mdx
@@ -59,11 +59,11 @@ The different database framework specifications for various programming language
| -------------------------------------- | ------------- | --------------- | ------------- | ------------- | ------------- | ------------- |
| **Connection Management** | Support | Support | Support | Support | Support | Support |
| **Regular Query** | Support | Support | Support | Support | Support | Support |
-| **Parameter Binding** | Not supported | Not supported | support | Support | Not supported | Support |
-| **Subscription (TMQ) ** | Not supported | Not supported | support | Not supported | Not supported | Support |
-| **Schemaless** | Not supported | Not supported | Not supported | Not supported | Not supported | Not supported |
-| **Bulk Pulling (based on WebSocket) ** | Support | Support | Support | support | Support | Support |
-| **DataFrame** | Not supported | Support | Not supported | Not supported | Not supported | Not supported |
+| **Parameter Binding** | Not Supported | Not Supported | Support | Support | Not Supported | Support |
+| **Subscription (TMQ) ** | Not Supported | Support | Support | Not Supported | Not Supported | Support |
+| **Schemaless** | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported |
+| **Bulk Pulling (based on WebSocket) ** | Support | Support | Support | Support | Support | Support |
+| **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported |
:::warning
diff --git a/docs/en/14-reference/05-taosbenchmark.md b/docs/en/14-reference/05-taosbenchmark.md
index 19feeb674060cbe0e7ec13ed4e47bb3fd85836cc..4017b12be923a9bcb5696c8b4b57c2d67b5c1378 100644
--- a/docs/en/14-reference/05-taosbenchmark.md
+++ b/docs/en/14-reference/05-taosbenchmark.md
@@ -92,7 +92,7 @@ taosBenchmark -f
-## Command-line argument in detailed
+## Command-line argument in detail
- **-f/--file ** :
specify the configuration file to use. This file includes All parameters. Users should not use this parameter with other parameters on the command-line. There is no default value.
@@ -198,7 +198,7 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
- **-R/--disorder-range ** :
Specify the timestamp range for the disordered data. It leads the resulting disorder timestamp as the ordered timestamp minus a random value in this range. Valid only if the percentage of disordered data specified by `-O/--disorder` is greater than 0.
-- **-F/--prepare_rand ** :
+- **-F/--prepared_rand ** :
Specify the number of unique values in the generated random data. A value of 1 means that all data are equal. The default value is 10000.
- **-a/--replica ** :
@@ -216,7 +216,7 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
- **-? /--help** :
Show help information and exit. Users should not use it with other parameters.
-## Configuration file parameters in detailed
+## Configuration file parameters in detail
### General configuration parameters
@@ -380,7 +380,7 @@ The configuration parameters for specifying super table tag columns and data col
- **num_of_records_per_req** :
Writing the number of rows of records per request to TDengine, the default value is 30000. When it is set too large, the TDengine client driver will return the corresponding error message, so you need to lower the setting of this parameter to meet the writing requirements.
-- **prepare_rand**: The number of unique values in the generated random data. A value of 1 means that all data are equal. The default value is 10000.
+- **prepared_rand**: The number of unique values in the generated random data. A value of 1 means that all data are equal. The default value is 10000.
### Query scenario configuration parameters
diff --git a/docs/en/14-reference/12-config/index.md b/docs/en/14-reference/12-config/index.md
index b6bfa4bc7d57a9139992a0f1aab528b267e5bd03..9e56a0b0bff931c3b10103c5d63f9134baf280a1 100644
--- a/docs/en/14-reference/12-config/index.md
+++ b/docs/en/14-reference/12-config/index.md
@@ -142,6 +142,15 @@ The parameters described in this document by the effect that they have on the sy
| Meaning | Switch for allowing TDengine to collect and report service usage information |
| Value Range | 0: Not allowed; 1: Allowed |
| Default Value | 1 |
+### crashReporting
+
+| Attribute | Description |
+| -------- | -------------------------------------------- |
+| Applicable | Server Only |
+| Meaning |Switch for allowing TDengine to collect and report crash related information |
+| Value Range | 0,1 0: Not allowed;1:allowed |
+| Default Value | 1 |
+
## Query Parameters
@@ -314,6 +323,7 @@ The charset that takes effect is UTF-8.
| Applicable | Server Only |
| Meaning | All data files are stored in this directory |
| Default Value | /var/lib/taos |
+| Note | The [Tiered Storage](https://docs.tdengine.com/tdinternal/arch/#tiered-storage) function needs to be used in conjunction with the [KEEP](https://docs.tdengine.com/taos-sql/database/#parameters) parameter |
### tempDir
@@ -594,7 +604,7 @@ The charset that takes effect is UTF-8.
| Attribute | Description |
| -------- | ----------------------------- |
| Applicable | Client only |
-| Meaning | Whether schemaless columns are consistently ordered |
+| Meaning | Whether schemaless columns are consistently ordered, depat, discarded since 3.0.3.0|
| Value Range | 0: not consistent; 1: consistent. |
| Default | 1 |
@@ -656,7 +666,7 @@ The charset that takes effect is UTF-8.
| 20 | minimalTmpDirGB | Yes | Yes | |
| 21 | smlChildTableName | Yes | Yes | |
| 22 | smlTagName | Yes | Yes | |
-| 23 | smlDataFormat | No | Yes | |
+| 23 | smlDataFormat | No | Yes(discarded since 3.0.3.0) | |
| 24 | statusInterval | Yes | Yes | |
| 25 | logDir | Yes | Yes | |
| 26 | minimalLogDirGB | Yes | Yes | |
diff --git a/docs/en/14-reference/13-schemaless/13-schemaless.md b/docs/en/14-reference/13-schemaless/13-schemaless.md
index 10321ab083e6e654e66cb73f1bc21f9fbd678fda..a97a54af02601ddc2dde43614ac7ba8a6c8d4009 100644
--- a/docs/en/14-reference/13-schemaless/13-schemaless.md
+++ b/docs/en/14-reference/13-schemaless/13-schemaless.md
@@ -80,7 +80,7 @@ You can configure smlChildTableName in taos.cfg to specify table names, for exam
NULL.
6. For BINARY or NCHAR columns, if the length of the value provided in a data row exceeds the column type limit, the maximum length of characters allowed to be stored in the column is automatically increased (only incremented and not decremented) to ensure complete preservation of the data.
7. Errors encountered throughout the processing will interrupt the writing process and return an error code.
-8. It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3)
+8. It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3, discarded since 3.0.3.0)
:::tip
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed
diff --git a/docs/en/27-train-faq/01-faq.md b/docs/en/27-train-faq/01-faq.md
index 82e98b0d980c16acad0783abd62525cc6bde06ec..7650e9736556d3b9887e0c2fc8f9e94091c58c2d 100644
--- a/docs/en/27-train-faq/01-faq.md
+++ b/docs/en/27-train-faq/01-faq.md
@@ -33,7 +33,7 @@ TDengine 3.0 is not compatible with the configuration and data files from previo
4. Install TDengine 3.0.
5. For assistance in migrating data to TDengine 3.0, contact [TDengine Support](https://tdengine.com/support).
-### 4. How can I resolve the "Unable to establish connection" error?
+### 2. How can I resolve the "Unable to establish connection" error?
This error indicates that the client could not connect to the server. Perform the following troubleshooting steps:
@@ -68,7 +68,7 @@ This error indicates that the client could not connect to the server. Perform th
11. You can also use the TDengine CLI to diagnose network issues. For more information, see [Problem Diagnostics](https://docs.tdengine.com/operation/diagnose/).
-### 5. How can I resolve the "Unable to resolve FQDN" error?
+### 3. How can I resolve the "Unable to resolve FQDN" error?
Clients and dnodes must be able to resolve the FQDN of each required node. You can confirm your configuration as follows:
@@ -79,15 +79,15 @@ Clients and dnodes must be able to resolve the FQDN of each required node. You c
5. If TDengine has been previously installed and the `hostname` was modified, open `dnode.json` in the `data` folder and verify that the endpoint configuration is correct. The default location of the dnode file is `/var/lib/taos/dnode`. Ensure that you clean up previous installations before reinstalling TDengine.
6. Confirm whether FQDNs are preconfigured in `/etc/hosts` and `/etc/hostname`.
-### 6. What is the most effective way to write data to TDengine?
+### 4. What is the most effective way to write data to TDengine?
Writing data in batches provides higher efficiency in most situations. You can insert one or more data records into one or more tables in a single SQL statement.
-### 9. Why are table names not fully displayed?
+### 5. Why are table names not fully displayed?
The number of columns in the TDengine CLI terminal display is limited. This can cause table names to be cut off, and if you use an incomplete name in a statement, the "Table does not exist" error will occur. You can increase the display size with the `maxBinaryDisplayWidth` parameter or the SQL statement `set max_binary_display_width`. You can also append `\G` to your SQL statement to bypass this limitation.
-### 10. How can I migrate data?
+### 6. How can I migrate data?
In TDengine, the `hostname` uniquely identifies a machine. When you move data files to a new machine, you must configure the new machine to have the same `host name` as the original machine.
@@ -97,7 +97,7 @@ The data structure of previous versions of TDengine is not compatible with versi
:::
-### 11. How can I temporary change the log level from the TDengine Client?
+### 7. How can I temporary change the log level from the TDengine Client?
To change the log level for debugging purposes, you can use the following command:
@@ -118,14 +118,14 @@ Use `resetlog` to remove all logs generated on the local client. Use the other p
For each parameter, you can set the value to `131` (error and warning), `135` (error, warning, and debug), or `143` (error, warning, debug, and trace).
-### Why do TDengine components written in Go fail to compile?
+### 8. Why do TDengine components written in Go fail to compile?
TDengine includes taosAdapter, an independent component written in Go. This component provides the REST API as well as data access for other products such as Prometheus and Telegraf.
When using the develop branch, you must run `git submodule update --init --recursive` to download the taosAdapter repository and then compile it.
TDengine Go components require Go version 1.14 or later.
-### 13. How can I query the storage space being used by my data?
+### 9. How can I query the storage space being used by my data?
The TDengine data files are stored in `/var/lib/taos` by default. Log files are stored in `/var/log/taos`.
@@ -133,7 +133,7 @@ To see how much space your data files occupy, run `du -sh /var/lib/taos/vnode --
If you want to see how much space is occupied by a single database, first determine which vgroup is storing the database by running `show vgroups`. Then check `/var/lib/taos/vnode` for the files associated with the vgroup ID.
-### 15. How is timezone information processed for timestamps?
+### 10. How is timezone information processed for timestamps?
TDengine uses the timezone of the client for timestamps. The server timezone does not affect timestamps. The client converts Unix timestamps in SQL statements to UTC before sending them to the server. When you query data on the server, it provides timestamps in UTC to the client, which converts them to its local time.
@@ -144,13 +144,13 @@ Timestamps are processed as follows:
3. A timezone explicitly specified when establishing a connection to TDengine through a connector takes precedence over `taos.cfg` and the system timezone. For example, the Java connector allows you to specify a timezone in the JDBC URL.
4. If you use an RFC 3339 timestamp (2013-04-12T15:52:01.123+08:00), or an ISO 8601 timestamp (2013-04-12T15:52:01.123+0800), the timezone specified in the timestamp is used instead of the timestamps configured using any other method.
-### 16. Which network ports are required by TDengine?
+### 11. Which network ports are required by TDengine?
See [serverPort](https://docs.tdengine.com/reference/config/#serverport) in Configuration Parameters.
Note that ports are specified using 6030 as the default first port. If you change this port, all other ports change as well.
-### 17. Why do applications such as Grafana fail to connect to TDengine over the REST API?
+### 12. Why do applications such as Grafana fail to connect to TDengine over the REST API?
In TDengine, the REST API is provided by taosAdapter. Ensure that taosAdapter is running before you connect an application to TDengine over the REST API. You can run `systemctl start taosadapter` to start the service.
@@ -158,7 +158,7 @@ Note that the log path for taosAdapter must be configured separately. The defaul
For more information, see [taosAdapter](https://docs.tdengine.com/reference/taosadapter/).
-### 18. How can I resolve out-of-memory (OOM) errors?
+### 13. How can I resolve out-of-memory (OOM) errors?
OOM errors are thrown by the operating system when its memory, including swap, becomes insufficient and it needs to terminate processes to remain operational. Most OOM errors in TDengine occur for one of the following reasons: free memory is less than the value of `vm.min_free_kbytes` or free memory is less than the size of the request. If TDengine occupies reserved memory, an OOM error can occur even when free memory is sufficient.
diff --git a/docs/en/28-releases/01-tdengine.md b/docs/en/28-releases/01-tdengine.md
index 9b83c5fd65a5aa364892b608ffb8b25bd56f66b4..83ea3eb5e6d3edd69d65774a9761324a8f77dcd9 100644
--- a/docs/en/28-releases/01-tdengine.md
+++ b/docs/en/28-releases/01-tdengine.md
@@ -10,6 +10,14 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
import Release from "/components/ReleaseV3";
+## 3.0.2.4
+
+
+
+## 3.0.2.3
+
+
+
## 3.0.2.2
diff --git a/docs/en/28-releases/02-tools.md b/docs/en/28-releases/02-tools.md
index dd44e43ab63a7ea227665706f7f7e593a3b81182..97fed654f2f25c782a036501c39204be5750d93e 100644
--- a/docs/en/28-releases/02-tools.md
+++ b/docs/en/28-releases/02-tools.md
@@ -10,6 +10,14 @@ For other historical version installers, please visit [here](https://www.taosdat
import Release from "/components/ReleaseV3";
+## 2.4.2
+
+
+
+## 2.4.1
+
+
+
## 2.4.0
diff --git a/docs/examples/csharp/asyncQuery/asyncquery.csproj b/docs/examples/csharp/asyncQuery/asyncquery.csproj
index 23e590cd25aa88e58cabf81717a6baf320f447bc..7c5b693f28dfa8832ae08bbaae9aa8a367951c70 100644
--- a/docs/examples/csharp/asyncQuery/asyncquery.csproj
+++ b/docs/examples/csharp/asyncQuery/asyncquery.csproj
@@ -9,7 +9,7 @@
-
+
diff --git a/docs/examples/csharp/connect/connect.csproj b/docs/examples/csharp/connect/connect.csproj
index 3a912f8987ace6ae540726886d901c8d32a7b81b..a08e86d4b42199be44a6551e37da11efb6e06a34 100644
--- a/docs/examples/csharp/connect/connect.csproj
+++ b/docs/examples/csharp/connect/connect.csproj
@@ -9,7 +9,7 @@
-
+
diff --git a/docs/examples/csharp/influxdbLine/influxdbline.csproj b/docs/examples/csharp/influxdbLine/influxdbline.csproj
index 58bca485088e409fe1d387c6020418bbc2bf871b..4889f8fde9dc0eb75c0547e32355929d1cceb138 100644
--- a/docs/examples/csharp/influxdbLine/influxdbline.csproj
+++ b/docs/examples/csharp/influxdbLine/influxdbline.csproj
@@ -9,7 +9,7 @@
-
+
diff --git a/docs/examples/csharp/optsJSON/optsJSON.csproj b/docs/examples/csharp/optsJSON/optsJSON.csproj
index da16025dcd45f8e5c4ba6e242524c2e56191e93c..208f04c82d19f83f2746871b64a6dfdf0dcf3eae 100644
--- a/docs/examples/csharp/optsJSON/optsJSON.csproj
+++ b/docs/examples/csharp/optsJSON/optsJSON.csproj
@@ -9,7 +9,7 @@
-
+
diff --git a/docs/examples/csharp/optsTelnet/optstelnet.csproj b/docs/examples/csharp/optsTelnet/optstelnet.csproj
index 194de21bcc74653a2267b29681ece6243fd401fc..32c76ec4184b82e943897a36bc3bcbbd9ec85149 100644
--- a/docs/examples/csharp/optsTelnet/optstelnet.csproj
+++ b/docs/examples/csharp/optsTelnet/optstelnet.csproj
@@ -9,7 +9,7 @@
-
+
diff --git a/docs/examples/csharp/query/query.csproj b/docs/examples/csharp/query/query.csproj
index c97dbd3051e1a415b192e73d6753266b0b41b07d..360d73b2c096ef86df59876d0629fd0c4b6a239b 100644
--- a/docs/examples/csharp/query/query.csproj
+++ b/docs/examples/csharp/query/query.csproj
@@ -9,7 +9,7 @@
-
+
diff --git a/docs/examples/csharp/sqlInsert/sqlinsert.csproj b/docs/examples/csharp/sqlInsert/sqlinsert.csproj
index ab0e5e717a78faad07c949b434b0d0b8a26c7211..1b6f745c82437e9796da4c48fc720600dbe99cb5 100644
--- a/docs/examples/csharp/sqlInsert/sqlinsert.csproj
+++ b/docs/examples/csharp/sqlInsert/sqlinsert.csproj
@@ -9,7 +9,7 @@
-
+
diff --git a/docs/examples/csharp/stmtInsert/Program.cs b/docs/examples/csharp/stmtInsert/Program.cs
index 87e1971feb8499c515206f05a1e916070ac57f4c..80cadb2ff8b596a0484d05ff15aeaa50f22ff859 100644
--- a/docs/examples/csharp/stmtInsert/Program.cs
+++ b/docs/examples/csharp/stmtInsert/Program.cs
@@ -42,7 +42,7 @@ namespace TDengineExample
// 5. execute
res = TDengine.StmtExecute(stmt);
- CheckStmtRes(res, "faild to execute");
+ CheckStmtRes(res, "failed to execute");
// 6. free
TaosMultiBind.FreeTaosBind(tags);
@@ -92,7 +92,7 @@ namespace TDengineExample
int code = TDengine.StmtClose(stmt);
if (code != 0)
{
- throw new Exception($"falied to close stmt, {code} reason: {TDengine.StmtErrorStr(stmt)} ");
+ throw new Exception($"failed to close stmt, {code} reason: {TDengine.StmtErrorStr(stmt)} ");
}
}
}
diff --git a/docs/examples/csharp/stmtInsert/stmtinsert.csproj b/docs/examples/csharp/stmtInsert/stmtinsert.csproj
index 3d459fbeda02ab03dc40dac2ecae290724cccbcc..f5b2b673971c3822e6f6c9b65b8f02bc9d4dc80e 100644
--- a/docs/examples/csharp/stmtInsert/stmtinsert.csproj
+++ b/docs/examples/csharp/stmtInsert/stmtinsert.csproj
@@ -9,7 +9,7 @@
-
+
diff --git a/docs/examples/csharp/subscribe/subscribe.csproj b/docs/examples/csharp/subscribe/subscribe.csproj
index 8ae1cf6bc6023558c28797a0d9fcccb2f2e87653..191b3f9e9bb07dc72c9bb452ad19e30e42af922a 100644
--- a/docs/examples/csharp/subscribe/subscribe.csproj
+++ b/docs/examples/csharp/subscribe/subscribe.csproj
@@ -9,7 +9,7 @@
-
+
diff --git a/docs/examples/csharp/wsConnect/wsConnect.csproj b/docs/examples/csharp/wsConnect/wsConnect.csproj
index 34951dc761903e5a4b7a4bec5dfe55a965ab88be..6d78be6e7a112475523d449b1ba308923bf13add 100644
--- a/docs/examples/csharp/wsConnect/wsConnect.csproj
+++ b/docs/examples/csharp/wsConnect/wsConnect.csproj
@@ -3,11 +3,16 @@
Exenet5.0
- enable
-
+
+
+
+
+
+
+
diff --git a/docs/examples/csharp/wsInsert/wsInsert.csproj b/docs/examples/csharp/wsInsert/wsInsert.csproj
index 34951dc761903e5a4b7a4bec5dfe55a965ab88be..95bfbdea3df6d1f8047c082a31a43dad958edce0 100644
--- a/docs/examples/csharp/wsInsert/wsInsert.csproj
+++ b/docs/examples/csharp/wsInsert/wsInsert.csproj
@@ -5,9 +5,13 @@
net5.0enable
-
-
+
-
+
+
+
+
+
+
diff --git a/docs/examples/csharp/wsQuery/wsQuery.csproj b/docs/examples/csharp/wsQuery/wsQuery.csproj
index 34951dc761903e5a4b7a4bec5dfe55a965ab88be..e5c2cf767cf4a427d11a72298e932940706fb2f4 100644
--- a/docs/examples/csharp/wsQuery/wsQuery.csproj
+++ b/docs/examples/csharp/wsQuery/wsQuery.csproj
@@ -7,7 +7,13 @@
-
+
+
+
+
+
+
+
diff --git a/docs/examples/csharp/wsStmt/wsStmt.csproj b/docs/examples/csharp/wsStmt/wsStmt.csproj
index 34951dc761903e5a4b7a4bec5dfe55a965ab88be..e5c2cf767cf4a427d11a72298e932940706fb2f4 100644
--- a/docs/examples/csharp/wsStmt/wsStmt.csproj
+++ b/docs/examples/csharp/wsStmt/wsStmt.csproj
@@ -7,7 +7,13 @@
-
+
+
+
+
+
+
+
diff --git a/docs/examples/go/go.mod b/docs/examples/go/go.mod
index 2bc1a74cb6ef14221fa384701773dc73fe3b161d..716a0ef5dc91b4d3598c8af304204edb99e9b584 100644
--- a/docs/examples/go/go.mod
+++ b/docs/examples/go/go.mod
@@ -2,5 +2,5 @@ module goexample
go 1.17
-require github.com/taosdata/driver-go/v3 3.0
+require github.com/taosdata/driver-go/v3 v3.1.0
diff --git a/docs/examples/go/go.sum b/docs/examples/go/go.sum
new file mode 100644
index 0000000000000000000000000000000000000000..13e13adaa189053696320a6eb9740daa319a98b7
--- /dev/null
+++ b/docs/examples/go/go.sum
@@ -0,0 +1,15 @@
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/taosdata/driver-go/v3 v3.1.0/go.mod h1:H2vo/At+rOPY1aMzUV9P49SVX7NlXb3LAbKw+MCLrmU=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/docs/examples/go/sub/main.go b/docs/examples/go/sub/main.go
index a13d394a2c5009c1ad88684109b6f16b4d8a0540..1f7218936fbe457615562ded1b938daca95225cb 100644
--- a/docs/examples/go/sub/main.go
+++ b/docs/examples/go/sub/main.go
@@ -1,17 +1,12 @@
package main
import (
- "context"
- "encoding/json"
"fmt"
- "strconv"
- "time"
+ "os"
"github.com/taosdata/driver-go/v3/af"
"github.com/taosdata/driver-go/v3/af/tmq"
- "github.com/taosdata/driver-go/v3/common"
- "github.com/taosdata/driver-go/v3/errors"
- "github.com/taosdata/driver-go/v3/wrapper"
+ tmqcommon "github.com/taosdata/driver-go/v3/common/tmq"
)
func main() {
@@ -28,79 +23,56 @@ func main() {
if err != nil {
panic(err)
}
- config := tmq.NewConfig()
- defer config.Destroy()
- err = config.SetGroupID("test")
if err != nil {
panic(err)
}
- err = config.SetAutoOffsetReset("earliest")
- if err != nil {
- panic(err)
- }
- err = config.SetConnectIP("127.0.0.1")
- if err != nil {
- panic(err)
- }
- err = config.SetConnectUser("root")
- if err != nil {
- panic(err)
- }
- err = config.SetConnectPass("taosdata")
+ consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{
+ "group.id": "test",
+ "auto.offset.reset": "earliest",
+ "td.connect.ip": "127.0.0.1",
+ "td.connect.user": "root",
+ "td.connect.pass": "taosdata",
+ "td.connect.port": "6030",
+ "client.id": "test_tmq_client",
+ "enable.auto.commit": "false",
+ "enable.heartbeat.background": "true",
+ "experimental.snapshot.enable": "true",
+ "msg.with.table.name": "true",
+ })
if err != nil {
panic(err)
}
- err = config.SetConnectPort("6030")
+ err = consumer.Subscribe("example_tmq_topic", nil)
if err != nil {
panic(err)
}
- err = config.SetMsgWithTableName(true)
+ _, err = db.Exec("create table example_tmq.t1 (ts timestamp,v int)")
if err != nil {
panic(err)
}
- err = config.EnableHeartBeat()
+ _, err = db.Exec("insert into example_tmq.t1 values(now,1)")
if err != nil {
panic(err)
}
- err = config.EnableAutoCommit(func(result *wrapper.TMQCommitCallbackResult) {
- if result.ErrCode != 0 {
- errStr := wrapper.TMQErr2Str(result.ErrCode)
- err := errors.NewError(int(result.ErrCode), errStr)
- panic(err)
+ for i := 0; i < 5; i++ {
+ ev := consumer.Poll(0)
+ if ev != nil {
+ switch e := ev.(type) {
+ case *tmqcommon.DataMessage:
+ fmt.Println(e.String())
+ case tmqcommon.Error:
+ fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e)
+ panic(e)
+ }
+ consumer.Commit()
}
- })
- if err != nil {
- panic(err)
}
- consumer, err := tmq.NewConsumer(config)
+ err = consumer.Unsubscribe()
if err != nil {
panic(err)
}
- err = consumer.Subscribe([]string{"example_tmq_topic"})
+ err = consumer.Close()
if err != nil {
panic(err)
}
- _, err = db.Exec("create table example_tmq.t1 (ts timestamp,v int)")
- if err != nil {
- panic(err)
- }
- _, err = db.Exec("insert into example_tmq.t1 values(now,1)")
- if err != nil {
- panic(err)
- }
- for {
- result, err := consumer.Poll(time.Second)
- if err != nil {
- panic(err)
- }
- if result.Type != common.TMQ_RES_DATA {
- panic("want message type 1 got " + strconv.Itoa(int(result.Type)))
- }
- data, _ := json.Marshal(result.Data)
- fmt.Println(string(data))
- consumer.Commit(context.Background(), result.Message)
- consumer.FreeMessage(result.Message)
- break
- }
- consumer.Close()
}
diff --git a/docs/examples/python/conn_native_pandas.py b/docs/examples/python/conn_native_pandas.py
index 56942ef57085766cd128b03cabb7a357587eab16..f3bab15efbe6669a88828fb194682dbfedb382df 100644
--- a/docs/examples/python/conn_native_pandas.py
+++ b/docs/examples/python/conn_native_pandas.py
@@ -1,8 +1,11 @@
import pandas
-from sqlalchemy import create_engine
+from sqlalchemy import create_engine, text
engine = create_engine("taos://root:taosdata@localhost:6030/power")
-df = pandas.read_sql("SELECT * FROM meters", engine)
+conn = engine.connect()
+df = pandas.read_sql(text("SELECT * FROM power.meters"), conn)
+conn.close()
+
# print index
print(df.index)
diff --git a/docs/examples/python/conn_rest_pandas.py b/docs/examples/python/conn_rest_pandas.py
index 0164080cd5a05e72dce40b1d111ea423623ff9b2..1b207d6ff10a353f3473116ce807cc8daf362ca7 100644
--- a/docs/examples/python/conn_rest_pandas.py
+++ b/docs/examples/python/conn_rest_pandas.py
@@ -1,8 +1,10 @@
import pandas
-from sqlalchemy import create_engine
+from sqlalchemy import create_engine, text
engine = create_engine("taosrest://root:taosdata@localhost:6041")
-df: pandas.DataFrame = pandas.read_sql("SELECT * FROM power.meters", engine)
+conn = engine.connect()
+df: pandas.DataFrame = pandas.read_sql(text("SELECT * FROM power.meters"), conn)
+conn.close()
# print index
print(df.index)
diff --git a/docs/examples/python/connect_rest_examples.py b/docs/examples/python/connect_rest_examples.py
index 900ec1022ec81ac2db761d918d1ec11c9bb26852..0f8625ae5387a275f7b84948ad80191b8e443862 100644
--- a/docs/examples/python/connect_rest_examples.py
+++ b/docs/examples/python/connect_rest_examples.py
@@ -1,24 +1,25 @@
# ANCHOR: connect
from taosrest import connect, TaosRestConnection, TaosRestCursor
-conn: TaosRestConnection = connect(url="http://localhost:6041",
- user="root",
- password="taosdata",
- timeout=30)
+conn = connect(url="http://localhost:6041",
+ user="root",
+ password="taosdata",
+ timeout=30)
# ANCHOR_END: connect
# ANCHOR: basic
# create STable
-cursor: TaosRestCursor = conn.cursor()
+cursor = conn.cursor()
cursor.execute("DROP DATABASE IF EXISTS power")
cursor.execute("CREATE DATABASE power")
-cursor.execute("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)")
+cursor.execute(
+ "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)")
# insert data
-cursor.execute("""INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
- power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
- power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
- power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)""")
+cursor.execute("""INSERT INTO power.d1001 USING power.meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
+ power.d1002 USING power.meters TAGS('California.SanFrancisco', 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
+ power.d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
+ power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)""")
print("inserted row count:", cursor.rowcount)
# query data
@@ -28,7 +29,7 @@ print("queried row count:", cursor.rowcount)
# get column names from cursor
column_names = [meta[0] for meta in cursor.description]
# get rows
-data: list[tuple] = cursor.fetchall()
+data = cursor.fetchall()
print(column_names)
for row in data:
print(row)
diff --git a/docs/examples/python/connection_usage_native_reference.py b/docs/examples/python/connection_usage_native_reference.py
index a7179b4cf859eb440b535a797eeb8e2be1e33589..8b754ec7226e8fd25dbdeb27b28faebdcf612049 100644
--- a/docs/examples/python/connection_usage_native_reference.py
+++ b/docs/examples/python/connection_usage_native_reference.py
@@ -8,7 +8,7 @@ conn.execute("CREATE DATABASE test")
# change database. same as execute "USE db"
conn.select_db("test")
conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)")
-affected_row: int = conn.execute("INSERT INTO t1 USING weather TAGS(1) VALUES (now, 23.5) (now+1m, 23.5) (now+2m, 24.4)")
+affected_row = conn.execute("INSERT INTO t1 USING weather TAGS(1) VALUES (now, 23.5) (now+1m, 23.5) (now+2m, 24.4)")
print("affected_row", affected_row)
# output:
# affected_row 3
@@ -16,10 +16,10 @@ print("affected_row", affected_row)
# ANCHOR: query
# Execute a sql and get its result set. It's useful for SELECT statement
-result: taos.TaosResult = conn.query("SELECT * from weather")
+result = conn.query("SELECT * from weather")
# Get fields from result
-fields: taos.field.TaosFields = result.fields
+fields = result.fields
for field in fields:
print(field) # {name: ts, type: 9, bytes: 8}
diff --git a/docs/examples/python/fast_write_example.py b/docs/examples/python/fast_write_example.py
index c9d606388fdecd85f1468f24cc497ecc5941f035..626e3310b120b9415952614b4b110ed29f787582 100644
--- a/docs/examples/python/fast_write_example.py
+++ b/docs/examples/python/fast_write_example.py
@@ -1,15 +1,14 @@
# install dependencies:
# recommend python >= 3.8
-# pip3 install faster-fifo
#
import logging
import math
+import multiprocessing
import sys
import time
import os
-from multiprocessing import Process
-from faster_fifo import Queue
+from multiprocessing import Process, Queue
from mockdatasource import MockDataSource
from queue import Empty
from typing import List
@@ -22,8 +21,7 @@ TABLE_COUNT = 1000
QUEUE_SIZE = 1000000
MAX_BATCH_SIZE = 3000
-read_processes = []
-write_processes = []
+_DONE_MESSAGE = '__DONE__'
def get_connection():
@@ -44,41 +42,64 @@ def get_connection():
# ANCHOR: read
-def run_read_task(task_id: int, task_queues: List[Queue]):
+def run_read_task(task_id: int, task_queues: List[Queue], infinity):
table_count_per_task = TABLE_COUNT // READ_TASK_COUNT
- data_source = MockDataSource(f"tb{task_id}", table_count_per_task)
+ data_source = MockDataSource(f"tb{task_id}", table_count_per_task, infinity)
try:
for batch in data_source:
+ if isinstance(batch, tuple):
+ batch = [batch]
for table_id, rows in batch:
# hash data to different queue
i = table_id % len(task_queues)
# block putting forever when the queue is full
- task_queues[i].put_many(rows, block=True, timeout=-1)
+ for row in rows:
+ task_queues[i].put(row)
+ if not infinity:
+ for queue in task_queues:
+ queue.put(_DONE_MESSAGE)
except KeyboardInterrupt:
pass
+ finally:
+ logging.info('read task over')
# ANCHOR_END: read
+
# ANCHOR: write
-def run_write_task(task_id: int, queue: Queue):
+def run_write_task(task_id: int, queue: Queue, done_queue: Queue):
from sql_writer import SQLWriter
log = logging.getLogger(f"WriteTask-{task_id}")
writer = SQLWriter(get_connection)
lines = None
try:
while True:
- try:
- # get as many as possible
- lines = queue.get_many(block=False, max_messages_to_get=MAX_BATCH_SIZE)
+ over = False
+ lines = []
+ for _ in range(MAX_BATCH_SIZE):
+ try:
+ line = queue.get_nowait()
+ if line == _DONE_MESSAGE:
+ over = True
+ break
+ if line:
+ lines.append(line)
+ except Empty:
+ time.sleep(0.1)
+ if len(lines) > 0:
writer.process_lines(lines)
- except Empty:
- time.sleep(0.01)
+ if over:
+ done_queue.put(_DONE_MESSAGE)
+ break
except KeyboardInterrupt:
pass
except BaseException as e:
log.debug(f"lines={lines}")
raise e
+ finally:
+ writer.close()
+ log.debug('write task over')
# ANCHOR_END: write
@@ -103,47 +124,64 @@ def set_global_config():
# ANCHOR: monitor
-def run_monitor_process():
+def run_monitor_process(done_queue: Queue):
log = logging.getLogger("DataBaseMonitor")
- conn = get_connection()
- conn.execute("DROP DATABASE IF EXISTS test")
- conn.execute("CREATE DATABASE test")
- conn.execute("CREATE STABLE test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) "
- "TAGS (location BINARY(64), groupId INT)")
+ conn = None
+ try:
+ conn = get_connection()
- def get_count():
- res = conn.query("SELECT count(*) FROM test.meters")
- rows = res.fetch_all()
- return rows[0][0] if rows else 0
+ def get_count():
+ res = conn.query("SELECT count(*) FROM test.meters")
+ rows = res.fetch_all()
+ return rows[0][0] if rows else 0
- last_count = 0
- while True:
- time.sleep(10)
- count = get_count()
- log.info(f"count={count} speed={(count - last_count) / 10}")
- last_count = count
+ last_count = 0
+ while True:
+ try:
+ done = done_queue.get_nowait()
+ if done == _DONE_MESSAGE:
+ break
+ except Empty:
+ pass
+ time.sleep(10)
+ count = get_count()
+ log.info(f"count={count} speed={(count - last_count) / 10}")
+ last_count = count
+ finally:
+ conn.close()
# ANCHOR_END: monitor
# ANCHOR: main
-def main():
+def main(infinity):
set_global_config()
logging.info(f"READ_TASK_COUNT={READ_TASK_COUNT}, WRITE_TASK_COUNT={WRITE_TASK_COUNT}, "
f"TABLE_COUNT={TABLE_COUNT}, QUEUE_SIZE={QUEUE_SIZE}, MAX_BATCH_SIZE={MAX_BATCH_SIZE}")
- monitor_process = Process(target=run_monitor_process)
+ conn = get_connection()
+ conn.execute("DROP DATABASE IF EXISTS test")
+ conn.execute("CREATE DATABASE IF NOT EXISTS test")
+ conn.execute("CREATE STABLE IF NOT EXISTS test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) "
+ "TAGS (location BINARY(64), groupId INT)")
+ conn.close()
+
+ done_queue = Queue()
+ monitor_process = Process(target=run_monitor_process, args=(done_queue,))
monitor_process.start()
- time.sleep(3) # waiting for database ready.
+ logging.debug(f"monitor task started with pid {monitor_process.pid}")
task_queues: List[Queue] = []
+ write_processes = []
+ read_processes = []
+
# create task queues
for i in range(WRITE_TASK_COUNT):
- queue = Queue(max_size_bytes=QUEUE_SIZE)
+ queue = Queue()
task_queues.append(queue)
# create write processes
for i in range(WRITE_TASK_COUNT):
- p = Process(target=run_write_task, args=(i, task_queues[i]))
+ p = Process(target=run_write_task, args=(i, task_queues[i], done_queue))
p.start()
logging.debug(f"WriteTask-{i} started with pid {p.pid}")
write_processes.append(p)
@@ -151,13 +189,19 @@ def main():
# create read processes
for i in range(READ_TASK_COUNT):
queues = assign_queues(i, task_queues)
- p = Process(target=run_read_task, args=(i, queues))
+ p = Process(target=run_read_task, args=(i, queues, infinity))
p.start()
logging.debug(f"ReadTask-{i} started with pid {p.pid}")
read_processes.append(p)
try:
monitor_process.join()
+ for p in read_processes:
+ p.join()
+ for p in write_processes:
+ p.join()
+ time.sleep(1)
+ return
except KeyboardInterrupt:
monitor_process.terminate()
[p.terminate() for p in read_processes]
@@ -176,5 +220,6 @@ def assign_queues(read_task_id, task_queues):
if __name__ == '__main__':
- main()
+ multiprocessing.set_start_method('spawn')
+ main(False)
# ANCHOR_END: main
diff --git a/docs/examples/python/kafka_example.py b/docs/examples/python/kafka_example.py
index 735059eec0f3dcf5094810916e66a39db5682560..43f9183f7e25b680827aef836363ef5f0549468b 100644
--- a/docs/examples/python/kafka_example.py
+++ b/docs/examples/python/kafka_example.py
@@ -26,7 +26,8 @@ class Consumer(object):
'bath_consume': True,
'batch_size': 1000,
'async_model': True,
- 'workers': 10
+ 'workers': 10,
+ 'testing': False
}
LOCATIONS = ['California.SanFrancisco', 'California.LosAngles', 'California.SanDiego', 'California.SanJose',
@@ -46,11 +47,12 @@ class Consumer(object):
def __init__(self, **configs):
self.config: dict = self.DEFAULT_CONFIGS
self.config.update(configs)
- self.consumer = KafkaConsumer(
- self.config.get('kafka_topic'), # topic
- bootstrap_servers=self.config.get('kafka_brokers'),
- group_id=self.config.get('kafka_group_id'),
- )
+ if not self.config.get('testing'):
+ self.consumer = KafkaConsumer(
+ self.config.get('kafka_topic'), # topic
+ bootstrap_servers=self.config.get('kafka_brokers'),
+ group_id=self.config.get('kafka_group_id'),
+ )
self.taos = taos.connect(
host=self.config.get('taos_host'),
user=self.config.get('taos_user'),
@@ -60,7 +62,7 @@ class Consumer(object):
)
if self.config.get('async_model'):
self.pool = ThreadPoolExecutor(max_workers=self.config.get('workers'))
- self.tasks: list[Future] = []
+ self.tasks = []
# tags and table mapping # key: {location}_{groupId} value:
self.tag_table_mapping = {}
i = 0
@@ -115,14 +117,14 @@ class Consumer(object):
if self.taos is not None:
self.taos.close()
- def _run(self, f: Callable[[ConsumerRecord], bool]):
+ def _run(self, f):
for message in self.consumer:
if self.config.get('async_model'):
self.pool.submit(f(message))
else:
f(message)
- def _run_batch(self, f: Callable[[list[list[ConsumerRecord]]], None]):
+ def _run_batch(self, f):
while True:
messages = self.consumer.poll(timeout_ms=500, max_records=self.config.get('batch_size'))
if messages:
@@ -140,7 +142,7 @@ class Consumer(object):
logging.info('## insert sql %s', sql)
return self.taos.execute(sql=sql) == 1
- def _to_taos_batch(self, messages: list[list[ConsumerRecord]]):
+ def _to_taos_batch(self, messages):
sql = self._build_sql_batch(messages=messages)
if len(sql) == 0: # decode error, skip
return
@@ -162,7 +164,7 @@ class Consumer(object):
table_name = self._get_table_name(location=location, group_id=group_id)
return self.INSERT_PART_SQL.format(table_name, ts, current, voltage, phase)
- def _build_sql_batch(self, messages: list[list[ConsumerRecord]]) -> str:
+ def _build_sql_batch(self, messages) -> str:
sql_list = []
for partition_messages in messages:
for message in partition_messages:
@@ -186,7 +188,54 @@ def _get_location_and_group(key: str) -> (str, int):
return fields[0], fields[1]
+def test_to_taos(consumer: Consumer):
+ msg = {
+ 'location': 'California.SanFrancisco',
+ 'groupId': 1,
+ 'ts': '2022-12-06 15:13:38.643',
+ 'current': 3.41,
+ 'voltage': 105,
+ 'phase': 0.02027,
+ }
+ record = ConsumerRecord(checksum=None, headers=None, offset=1, key=None, value=json.dumps(msg), partition=1,
+ topic='test', serialized_key_size=None, serialized_header_size=None,
+ serialized_value_size=None, timestamp=time.time(), timestamp_type=None)
+ assert consumer._to_taos(message=record)
+
+
+def test_to_taos_batch(consumer: Consumer):
+ records = [
+ [
+ ConsumerRecord(checksum=None, headers=None, offset=1, key=None,
+ value=json.dumps({'location': 'California.SanFrancisco',
+ 'groupId': 1,
+ 'ts': '2022-12-06 15:13:38.643',
+ 'current': 3.41,
+ 'voltage': 105,
+ 'phase': 0.02027, }),
+ partition=1, topic='test', serialized_key_size=None, serialized_header_size=None,
+ serialized_value_size=None, timestamp=time.time(), timestamp_type=None),
+ ConsumerRecord(checksum=None, headers=None, offset=1, key=None,
+ value=json.dumps({'location': 'California.LosAngles',
+ 'groupId': 2,
+ 'ts': '2022-12-06 15:13:39.643',
+ 'current': 3.41,
+ 'voltage': 102,
+ 'phase': 0.02027, }),
+ partition=1, topic='test', serialized_key_size=None, serialized_header_size=None,
+ serialized_value_size=None, timestamp=time.time(), timestamp_type=None),
+ ]
+ ]
+
+ consumer._to_taos_batch(messages=records)
+
+
if __name__ == '__main__':
- consumer = Consumer(async_model=True)
+ consumer = Consumer(async_model=True, testing=True)
+ # init env
consumer.init_env()
- consumer.consume()
\ No newline at end of file
+ # consumer.consume()
+ # test build sql
+ # test build sql batch
+ test_to_taos(consumer)
+ test_to_taos_batch(consumer)
diff --git a/docs/examples/python/mockdatasource.py b/docs/examples/python/mockdatasource.py
index 1c516a800e007934f8e6815f82024a53fea70073..9c702936ea6f1bdff3f604d376fd1925b4dc118e 100644
--- a/docs/examples/python/mockdatasource.py
+++ b/docs/examples/python/mockdatasource.py
@@ -10,13 +10,14 @@ class MockDataSource:
"9.4,118,0.141,California.SanFrancisco,4"
]
- def __init__(self, tb_name_prefix, table_count):
+ def __init__(self, tb_name_prefix, table_count, infinity=True):
self.table_name_prefix = tb_name_prefix + "_"
self.table_count = table_count
self.max_rows = 10000000
self.current_ts = round(time.time() * 1000) - self.max_rows * 100
# [(tableId, tableName, values),]
self.data = self._init_data()
+ self.infinity = infinity
def _init_data(self):
lines = self.samples * (self.table_count // 5 + 1)
@@ -28,14 +29,19 @@ class MockDataSource:
def __iter__(self):
self.row = 0
- return self
+ if not self.infinity:
+ return iter(self._iter_data())
+ else:
+ return self
def __next__(self):
"""
next 1000 rows for each table.
return: {tableId:[row,...]}
"""
- # generate 1000 timestamps
+ return self._iter_data()
+
+ def _iter_data(self):
ts = []
for _ in range(1000):
self.current_ts += 100
@@ -47,3 +53,9 @@ class MockDataSource:
rows = [table_name + ',' + t + ',' + values for t in ts]
result.append((table_id, rows))
return result
+
+
+if __name__ == '__main__':
+ datasource = MockDataSource('t', 10, False)
+ for data in datasource:
+ print(data)
diff --git a/docs/examples/python/native_insert_example.py b/docs/examples/python/native_insert_example.py
index 94fd00a6e9d1dcd2119693c4b5c862d36c219a3d..cdde7d23d24d12e11c67b6c6acc0e0b089fb5335 100644
--- a/docs/examples/python/native_insert_example.py
+++ b/docs/examples/python/native_insert_example.py
@@ -25,10 +25,10 @@ def create_stable(conn: taos.TaosConnection):
# The generated SQL is:
-# INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
-# d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
-# d1003 USING meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
-# d1004 USING meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)
+# INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
+# d1002 USING meters TAGS('California.SanFrancisco', 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
+# d1003 USING meters TAGS('California.LosAngeles', 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
+# d1004 USING meters TAGS('California.LosAngeles', 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)
def get_sql():
global lines
diff --git a/docs/examples/python/sql_writer.py b/docs/examples/python/sql_writer.py
index 758167376b009f21afc701be7d89c1bfbabdeb9f..3456981a7b9a174e38f8795ff7251ab3c675174b 100644
--- a/docs/examples/python/sql_writer.py
+++ b/docs/examples/python/sql_writer.py
@@ -10,6 +10,7 @@ class SQLWriter:
self._tb_tags = {}
self._conn = get_connection_func()
self._max_sql_length = self.get_max_sql_length()
+ self._conn.execute("create database if not exists test")
self._conn.execute("USE test")
def get_max_sql_length(self):
@@ -20,7 +21,7 @@ class SQLWriter:
return int(r[1])
return 1024 * 1024
- def process_lines(self, lines: str):
+ def process_lines(self, lines: [str]):
"""
:param lines: [[tbName,ts,current,voltage,phase,location,groupId]]
"""
@@ -60,6 +61,7 @@ class SQLWriter:
buf.append(q)
sql_len += len(q)
sql += " ".join(buf)
+ self.create_tables()
self.execute_sql(sql)
self._tb_values.clear()
@@ -88,3 +90,22 @@ class SQLWriter:
except BaseException as e:
self.log.error("Execute SQL: %s", sql)
raise e
+
+ def close(self):
+ if self._conn:
+ self._conn.close()
+
+
+if __name__ == '__main__':
+ def get_connection_func():
+ conn = taos.connect()
+ return conn
+
+
+ writer = SQLWriter(get_connection_func=get_connection_func)
+ writer.execute_sql(
+ "create stable if not exists meters (ts timestamp, current float, voltage int, phase float) "
+ "tags (location binary(64), groupId int)")
+ writer.execute_sql(
+ "INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) "
+ "VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32)")
diff --git a/docs/examples/python/tmq_example.py b/docs/examples/python/tmq_example.py
index a4625ca11accfbf7d263f4c1993f712987a136cb..6f7fb87c89ce4cb96793d09a837f60ad54ae69bc 100644
--- a/docs/examples/python/tmq_example.py
+++ b/docs/examples/python/tmq_example.py
@@ -1,58 +1,55 @@
+from taos.tmq import Consumer
import taos
-from taos.tmq import *
-conn = taos.connect()
-print("init")
-conn.execute("drop topic if exists topic_ctb_column")
-conn.execute("drop database if exists py_tmq")
-conn.execute("create database if not exists py_tmq vgroups 2")
-conn.select_db("py_tmq")
-conn.execute(
- "create stable if not exists stb1 (ts timestamp, c1 int, c2 float, c3 binary(10)) tags(t1 int)"
-)
-conn.execute("create table if not exists tb1 using stb1 tags(1)")
-conn.execute("create table if not exists tb2 using stb1 tags(2)")
-conn.execute("create table if not exists tb3 using stb1 tags(3)")
-
-print("create topic")
-conn.execute(
- "create topic if not exists topic_ctb_column as select ts, c1, c2, c3 from stb1"
-)
-
-print("build consumer")
-conf = TaosTmqConf()
-conf.set("group.id", "tg2")
-conf.set("td.connect.user", "root")
-conf.set("td.connect.pass", "taosdata")
-conf.set("enable.auto.commit", "true")
-
-
-def tmq_commit_cb_print(tmq, resp, offset, param=None):
- print(f"commit: {resp}, tmq: {tmq}, offset: {offset}, param: {param}")
-
-
-conf.set_auto_commit_cb(tmq_commit_cb_print, None)
-tmq = conf.new_consumer()
-
-print("build topic list")
-
-topic_list = TaosTmqList()
-topic_list.append("topic_ctb_column")
-
-print("basic consume loop")
-tmq.subscribe(topic_list)
-
-sub_list = tmq.subscription()
-
-print("subscribed topics: ", sub_list)
-
-while 1:
- res = tmq.poll(1000)
- if res:
- topic = res.get_topic_name()
- vg = res.get_vgroup_id()
- db = res.get_db_name()
- print(f"topic: {topic}\nvgroup id: {vg}\ndb: {db}")
- for row in res:
- print(row)
+def init_tmq_env(db, topic):
+ conn = taos.connect()
+ conn.execute("drop topic if exists {}".format(topic))
+ conn.execute("drop database if exists {}".format(db))
+ conn.execute("create database if not exists {}".format(db))
+ conn.select_db(db)
+ conn.execute(
+ "create stable if not exists stb1 (ts timestamp, c1 int, c2 float, c3 varchar(16)) tags(t1 int, t3 varchar(16))")
+ conn.execute("create table if not exists tb1 using stb1 tags(1, 't1')")
+ conn.execute("create table if not exists tb2 using stb1 tags(2, 't2')")
+ conn.execute("create table if not exists tb3 using stb1 tags(3, 't3')")
+ conn.execute("create topic if not exists {} as select ts, c1, c2, c3 from stb1".format(topic))
+ conn.execute("insert into tb1 values (now, 1, 1.0, 'tmq test')")
+ conn.execute("insert into tb2 values (now, 2, 2.0, 'tmq test')")
+ conn.execute("insert into tb3 values (now, 3, 3.0, 'tmq test')")
+
+
+def cleanup(db, topic):
+ conn = taos.connect()
+ conn.execute("drop topic if exists {}".format(topic))
+ conn.execute("drop database if exists {}".format(db))
+
+
+if __name__ == '__main__':
+ init_tmq_env("tmq_test", "tmq_test_topic") # init env
+ consumer = Consumer(
+ {
+ "group.id": "tg2",
+ "td.connect.user": "root",
+ "td.connect.pass": "taosdata",
+ "enable.auto.commit": "true",
+ }
+ )
+ consumer.subscribe(["tmq_test_topic"])
+
+ try:
+ while True:
+ res = consumer.poll(1)
+ if not res:
+ break
+ err = res.error()
+ if err is not None:
+ raise err
+ val = res.value()
+
+ for block in val:
+ print(block.fetchall())
+ finally:
+ consumer.unsubscribe()
+ consumer.close()
+ cleanup("tmq_test", "tmq_test_topic")
diff --git a/docs/examples/python/tmq_websocket_example.py b/docs/examples/python/tmq_websocket_example.py
new file mode 100644
index 0000000000000000000000000000000000000000..e1dcb0086a995c0c20a5d079ed6d8f4d18ea0356
--- /dev/null
+++ b/docs/examples/python/tmq_websocket_example.py
@@ -0,0 +1,31 @@
+#!/usr/bin/python3
+from taosws import Consumer
+
+conf = {
+ "td.connect.websocket.scheme": "ws",
+ "group.id": "0",
+}
+consumer = Consumer(conf)
+
+consumer.subscribe(["test"])
+
+while True:
+ message = consumer.poll(timeout=1.0)
+ if message:
+ id = message.vgroup()
+ topic = message.topic()
+ database = message.database()
+
+ for block in message:
+ nrows = block.nrows()
+ ncols = block.ncols()
+ for row in block:
+ print(row)
+ values = block.fetchall()
+ print(nrows, ncols)
+
+ # consumer.commit(message)
+ else:
+ break
+
+consumer.close()
diff --git a/docs/zh/05-get-started/index.md b/docs/zh/05-get-started/index.md
index 832310aa7c677940c7e4ca13be5f31c2d98a64dc..e144c563b97304f6257d3a1989d7caf85d3789aa 100644
--- a/docs/zh/05-get-started/index.md
+++ b/docs/zh/05-get-started/index.md
@@ -4,6 +4,7 @@ description: '快速设置 TDengine 环境并体验其高效写入和查询'
---
import xiaot from './xiaot.webp'
+import xiaot_new from './xiaot-new.webp'
import channel from './channel.webp'
import official_account from './official-account.webp'
@@ -35,13 +36,13 @@ TDengine 知识地图中涵盖了 TDengine 的各种知识点,揭示了各概