From d5c725552d582405483e760e7bfe07e79fcb4c62 Mon Sep 17 00:00:00 2001 From: dingbo8128 Date: Fri, 17 Feb 2023 14:52:06 +0800 Subject: [PATCH] refine cloud document 1. delete rest and sql from data-in and data-out 2. add sql to programming 3. remove schemaless from programmint/connect/rest 4. update taos sql document --- docs/en/07-data-in/01-insert-data.md | 106 -- docs/en/07-data-in/05-rest.md | 56 - docs/en/09-data-out/01-query-data.md | 244 ---- docs/en/09-data-out/02-rest.md | 56 - .../15-programming/01-connect/09-rest-api.md | 33 +- docs/en/15-programming/03-insert.md | 104 +- docs/en/15-programming/04-query.md | 241 +++- docs/en/15-programming/06-connector/index.md | 2 +- docs/en/17-taos-sql/01-data-type.md | 72 - docs/en/17-taos-sql/02-database.md | 156 --- docs/en/17-taos-sql/03-table.md | 198 --- docs/en/17-taos-sql/04-stable.md | 160 --- docs/en/17-taos-sql/05-insert.md | 132 -- docs/en/17-taos-sql/06-select.md | 414 ------ docs/en/17-taos-sql/08-delete-data.mdx | 41 - docs/en/17-taos-sql/10-function.md | 1238 ----------------- docs/en/17-taos-sql/12-distinguished.md | 156 --- docs/en/17-taos-sql/13-tmq.md | 60 - docs/en/17-taos-sql/14-stream.md | 100 -- docs/en/17-taos-sql/16-operators.md | 67 - docs/en/17-taos-sql/17-json.md | 92 -- docs/en/17-taos-sql/18-escape.md | 26 - docs/en/17-taos-sql/19-limit.md | 60 - docs/en/17-taos-sql/20-keywords.md | 277 ---- docs/en/17-taos-sql/26-udf.md | 67 - docs/en/17-taos-sql/27-index.md | 48 - docs/en/17-taos-sql/_category_.yml | 1 - docs/en/17-taos-sql/index.md | 40 - docs/en/17-taos-sql/timewindow-1.webp | Bin 4834 -> 0 bytes docs/en/17-taos-sql/timewindow-2.webp | Bin 4394 -> 0 bytes docs/en/17-taos-sql/timewindow-3.webp | Bin 19198 -> 0 bytes 31 files changed, 343 insertions(+), 3904 deletions(-) delete mode 100644 docs/en/07-data-in/01-insert-data.md delete mode 100644 docs/en/07-data-in/05-rest.md delete mode 100644 docs/en/09-data-out/01-query-data.md delete mode 100644 docs/en/09-data-out/02-rest.md delete mode 100644 docs/en/17-taos-sql/01-data-type.md delete mode 100644 docs/en/17-taos-sql/02-database.md delete mode 100644 docs/en/17-taos-sql/03-table.md delete mode 100644 docs/en/17-taos-sql/04-stable.md delete mode 100644 docs/en/17-taos-sql/05-insert.md delete mode 100644 docs/en/17-taos-sql/06-select.md delete mode 100644 docs/en/17-taos-sql/08-delete-data.mdx delete mode 100644 docs/en/17-taos-sql/10-function.md delete mode 100644 docs/en/17-taos-sql/12-distinguished.md delete mode 100644 docs/en/17-taos-sql/13-tmq.md delete mode 100644 docs/en/17-taos-sql/14-stream.md delete mode 100644 docs/en/17-taos-sql/16-operators.md delete mode 100644 docs/en/17-taos-sql/17-json.md delete mode 100644 docs/en/17-taos-sql/18-escape.md delete mode 100644 docs/en/17-taos-sql/19-limit.md delete mode 100644 docs/en/17-taos-sql/20-keywords.md delete mode 100644 docs/en/17-taos-sql/26-udf.md delete mode 100644 docs/en/17-taos-sql/27-index.md delete mode 100644 docs/en/17-taos-sql/_category_.yml delete mode 100644 docs/en/17-taos-sql/index.md delete mode 100644 docs/en/17-taos-sql/timewindow-1.webp delete mode 100644 docs/en/17-taos-sql/timewindow-2.webp delete mode 100644 docs/en/17-taos-sql/timewindow-3.webp diff --git a/docs/en/07-data-in/01-insert-data.md b/docs/en/07-data-in/01-insert-data.md deleted file mode 100644 index db55a2bd4d..0000000000 --- a/docs/en/07-data-in/01-insert-data.md +++ /dev/null @@ -1,106 +0,0 @@ ---- -sidebar_label: SQL -title: Insert Data Using SQL -description: Insert data using TDengine SQL ---- - -# Insert Data - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## SQL Examples - -Here are some brief examples for `INSET` statement. You can execute these statements manually by TDengine CLI or TDengine Cloud Explorer or programmatically by TDengine connectors. - -### Insert Single Row - -The below SQL statement is used to insert one row into table "d1001". - -```sql -INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31); -``` - -### Insert Multiple Rows - -Multiple rows can be inserted in a single SQL statement. The example below inserts 2 rows into table "d1001". - -```sql -INSERT INTO d1001 VALUES (1538548684000, 10.2, 220, 0.23) (1538548696650, 10.3, 218, 0.25); -``` - -### Insert into Multiple Tables - -Data can be inserted into multiple tables in the same SQL statement. The example below inserts 2 rows into table "d1001" and 1 row into table "d1002". - -```sql -INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 218, 0.33) d1002 VALUES (1538548696800, 12.3, 221, 0.31); -``` - -For more details about `INSERT` please refer to [INSERT](https://docs.tdengine.com/cloud/taos-sql/insert). - - -## Connector Examples - -:::note -Before executing the sample code in this section, you need to firstly establish connection to TDegnine cloud service, please refer to [Connect to TDengine Cloud Service](../../programming/connect/). - -::: - - - - -In this example, we use `execute` method to execute SQL and get affected rows. The variable `conn` is an instance of class `taosrest.TaosRestConnection` we just created at [Connect Tutorial](../../programming/connect/python#connect). - -```python -{{#include docs/examples/python/develop_tutorial.py:insert}} -``` - - - -```java -{{#include docs/examples/java/src/main/java/com/taos/example/CloudTutorial.java:insert}} -``` - - - - -```go -{{#include docs/examples/go/tutorial/main.go:insert}} -``` - - - - -In this example, we use `exec` method to execute SQL. `exec` is designed for some non-query SQL statements, all returned data would be ignored. - -```rust -{{#include docs/examples/rust/cloud-example/examples/tutorial.rs:insert}} -``` - - - - -```javascript -{{#include docs/examples/node/insert.js}} -``` - - - - - -``` XML -{{#include docs/examples/csharp/cloud-example/inout/inout.csproj}} -``` - -```csharp -{{#include docs/examples/csharp/cloud-example/inout/Program.cs:insert}} -``` - - - - - -:::note -`Use` statement is not applicable for cloud service since REST API is stateless. -::: \ No newline at end of file diff --git a/docs/en/07-data-in/05-rest.md b/docs/en/07-data-in/05-rest.md deleted file mode 100644 index 83041a1967..0000000000 --- a/docs/en/07-data-in/05-rest.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -sidebar_label: REST -title: REST -description: Insert data using REST API ---- - - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - - -In this section we will explain how to write into TDengine cloud service using REST API. - -## Config - -Run this command in your terminal to save the TDengine cloud token and URL as variables: - - - - -```bash -export TDENGINE_CLOUD_TOKEN="" -export TDENGINE_CLOUD_URL="" -``` - - - - -```bash -set TDENGINE_CLOUD_TOKEN="" -set TDENGINE_CLOUD_URL="" -``` - - - - -```powershell -$env:TDENGINE_CLOUD_TOKEN="" -$env:TDENGINE_CLOUD_URL="" -``` - - - - -## Insert - -Following command below show how to insert data into the table `d1001` of the database `test` via the command line utility `curl`. - -```bash -curl -L \ - -d "INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31)" \ - $TDENGINE_CLOUD_URL/rest/sql/test?token=$TDENGINE_CLOUD_TOKEN -``` - -Please refer to [REST-API](https://docs.tdengine.com/reference/rest-api/) for detailed documentation. diff --git a/docs/en/09-data-out/01-query-data.md b/docs/en/09-data-out/01-query-data.md deleted file mode 100644 index 3810b705aa..0000000000 --- a/docs/en/09-data-out/01-query-data.md +++ /dev/null @@ -1,244 +0,0 @@ ---- -sidebar_label: SQL -title: Query Data Using SQL -description: Read data from TDengine using basic SQL. ---- - -# Query Data - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Query Using SQL - -SQL is used by TDengine as its query language. Application programs can send SQL statements to TDengine through REST API or connectors. TDengine's CLI `taos` can also be used to execute ad hoc SQL queries. Here is the list of major query functionalities supported by TDengine: - -- Query on single column or multiple columns -- Filter on tags or data columns:>, <, =, <\>, like -- Grouping of results: `Group By` -- Sorting of results: `Order By` -- Limit the number of results: `Limit/Offset` -- Arithmetic on columns of numeric types or aggregate results -- Join query with timestamp alignment -- Aggregate functions: count, max, min, avg, sum, twa, stddev, leastsquares, top, bottom, first, last, percentile, apercentile, last_row, spread, diff - -For example, the SQL statement below can be executed in TDengine CLI `taos` to select records with voltage greater than 215 and limit the output to only 2 rows. - -```sql -select * from d1001 where voltage > 215 order by ts desc limit 2; -``` - -```title=Output -taos> select * from d1001 where voltage > 215 order by ts desc limit 2; - ts | current | voltage | phase | -====================================================================================== - 2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 | - 2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 | -Query OK, 2 row(s) in set (0.001100s) -``` - -To meet the requirements of varied use cases, some special functions have been added in TDengine. Some examples are `twa` (Time Weighted Average), `spread` (The difference between the maximum and the minimum), and `last_row` (the last row). Furthermore, continuous query is also supported in TDengine. - -For detailed query syntax please refer to [Select](https://docs.tdengine.com/cloud/taos-sql/select). - -## Aggregation among Tables - -In most use cases, there are always multiple kinds of data collection points. A new concept, called STable (abbreviation for super table), is used in TDengine to represent one type of data collection point, and a subtable is used to represent a specific data collection point of that type. Tags are used by TDengine to represent the static properties of data collection points. A specific data collection point has its own values for static properties. By specifying filter conditions on tags, aggregation can be performed efficiently among all the subtables created via the same STable, i.e. same type of data collection points. Aggregate functions applicable for tables can be used directly on STables; the syntax is exactly the same. - -In summary, records across subtables can be aggregated by a simple query on their STable. It is like a join operation. However, tables belonging to different STables can not be aggregated. - -### Example 1 - -In TDengine CLI `taos`, use the SQL below to get the average voltage of all the meters in California grouped by location. - -``` -taos> SELECT AVG(voltage) FROM meters GROUP BY location; - avg(voltage) | location | -============================================================= - 222.000000000 | California.LosAngeles | - 219.200000000 | California.SanFrancisco | -Query OK, 2 row(s) in set (0.002136s) -``` - -### Example 2 - -In TDengine CLI `taos`, use the SQL below to get the number of rows and the maximum current in the past 24 hours from meters whose groupId is 2. - -``` -taos> SELECT count(*), max(current) FROM meters where groupId = 2 and ts > now - 24h; - count(*) | max(current) | -================================== - 5 | 13.4 | -Query OK, 1 row(s) in set (0.002136s) -``` - -Join queries are only allowed between subtables of the same STable. In [Select](https://docs.tdengine.com/cloud/taos-sql/select), all query operations are marked as to whether they support STables or not. - -## Down Sampling and Interpolation - -In IoT use cases, down sampling is widely used to aggregate data by time range. The `INTERVAL` keyword in TDengine can be used to simplify the query by time window. For example, the SQL statement below can be used to get the sum of current every 10 seconds from meters table d1001. - -``` -taos> SELECT sum(current) FROM d1001 INTERVAL(10s); - ts | sum(current) | -====================================================== - 2018-10-03 14:38:00.000 | 10.300000191 | - 2018-10-03 14:38:10.000 | 24.900000572 | -Query OK, 2 row(s) in set (0.000883s) -``` - -Down sampling can also be used for STable. For example, the below SQL statement can be used to get the sum of current from all meters in California. - -``` -taos> SELECT SUM(current) FROM meters where location like "California%" INTERVAL(1s); - ts | sum(current) | -====================================================== - 2018-10-03 14:38:04.000 | 10.199999809 | - 2018-10-03 14:38:05.000 | 32.900000572 | - 2018-10-03 14:38:06.000 | 11.500000000 | - 2018-10-03 14:38:15.000 | 12.600000381 | - 2018-10-03 14:38:16.000 | 36.000000000 | -Query OK, 5 row(s) in set (0.001538s) -``` - -Down sampling also supports time offset. For example, the below SQL statement can be used to get the sum of current from all meters but each time window must start at the boundary of 500 milliseconds. - -``` -taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a); - ts | sum(current) | -====================================================== - 2018-10-03 14:38:04.500 | 11.189999809 | - 2018-10-03 14:38:05.500 | 31.900000572 | - 2018-10-03 14:38:06.500 | 11.600000000 | - 2018-10-03 14:38:15.500 | 12.300000381 | - 2018-10-03 14:38:16.500 | 35.000000000 | -Query OK, 5 row(s) in set (0.001521s) -``` - -In many use cases, it's hard to align the timestamp of the data collected by each collection point. However, a lot of algorithms like FFT require the data to be aligned with same time interval and application programs have to handle this by themselves. In TDengine, it's easy to achieve the alignment using down sampling. - -Interpolation can be performed in TDengine if there is no data in a time range. - -For more details please refer to [Aggregate by Window](https://docs.tdengine.com/cloud/taos-sql/interval). - -## Connector Examples - -:::note -Before executing the sample code in this section, you need to firstly establish connection to TDegnine cloud service, please refer to [Connect to TDengine Cloud Service](../../programming/connect/). - -::: - - - - -In this example, we use `query` method to execute SQL and get a `result` object. - -```python -{{#include docs/examples/python/develop_tutorial.py:query:nrc}} -``` - -Get column metadata(column name, column type and column length) from `result`: - -```python -{{#include docs/examples/python/develop_tutorial.py:fields:nrc}} -``` - -Get total rows from `result`: - -```python -{{#include docs/examples/python/develop_tutorial.py:rows:nrc}} -``` - -Iterate over each rows: - -```python -{{#include docs/examples/python/develop_tutorial.py:iter}} -``` - - - - -In this example we use `executeQuery` method of `Statement` object and get a `ResultSet` object. - -```java -{{#include docs/examples/java/src/main/java/com/taos/example/CloudTutorial.java:query:nrc}} -``` - -Get column meta from the result: - -```java -{{#include docs/examples/java/src/main/java/com/taos/example/CloudTutorial.java:meta:nrc}} -``` - -Iterate over the result and print each row: - -```java -{{#include docs/examples/java/src/main/java/com/taos/example/CloudTutorial.java:iter}} -``` - - - - -In this example we use `Query` method to execute SQL and get a `sql.Rows` object. - -```go -{{#include docs/examples/go/tutorial/main.go:query:nrc}} -``` - -Get column names from rows: - -```go -{{#include docs/examples/go/tutorial/main.go:meta:nrc}} -``` - -Iterate over rows and print each row: - -```go -{{#include docs/examples/go/tutorial/main.go:iter}} -``` - - - - -In this example, we use query method to execute SQL and get a result object. - -```rust -{{#include docs/examples/rust/cloud-example/examples/tutorial.rs:query:nrc}} -``` - -Get column meta from the result: - -```rust -{{#include docs/examples/rust/cloud-example/examples/tutorial.rs:meta:nrc}} -``` - -Get all rows and print each row: - -```rust -{{#include docs/examples/rust/cloud-example/examples/tutorial.rs:iter}} -``` - - - - -```javascript -{{#include docs/examples/node/query.js}} -``` - - - - - -In this example, we use query method to execute SQL and get a result object. - -``` XML -{{#include docs/examples/csharp/cloud-example/inout/inout.csproj}} -``` - -```C# -{{#include docs/examples/csharp/cloud-example/inout/Program.cs:query}} -``` - - - - diff --git a/docs/en/09-data-out/02-rest.md b/docs/en/09-data-out/02-rest.md deleted file mode 100644 index e3705e1ac4..0000000000 --- a/docs/en/09-data-out/02-rest.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -sidebar_label: REST -title: REST -description: Insert data using REST API ---- - - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - - -In this section we will explain how to query data from TDengine cloud service using REST API. - -## Config - -Run this command in your terminal to save the TDengine cloud token and URL as variables: - - - - -```bash -export TDENGINE_CLOUD_TOKEN="" -export TDENGINE_CLOUD_URL="" -``` - - - - -```bash -set TDENGINE_CLOUD_TOKEN="" -set TDENGINE_CLOUD_URL="" -``` - - - - -```powershell -$env:TDENGINE_CLOUD_TOKEN="" -$env:TDENGINE_CLOUD_URL="" -``` - - - - -## Query - -Following command below show how to query data into from table `ins_databases` of the database `information_schema` via the command line utility `curl`. - -```bash -curl -L \ - -d "select name, ntables, status from information_schema.ins_databases;" \ - $TDENGINE_CLOUD_URL/rest/sql/test?token=$TDENGINE_CLOUD_TOKEN -``` - -Please refer to [REST-API](https://docs.tdengine.com/reference/rest-api/) for detailed documentation. diff --git a/docs/en/15-programming/01-connect/09-rest-api.md b/docs/en/15-programming/01-connect/09-rest-api.md index 67ecc30b9d..a259d33e89 100644 --- a/docs/en/15-programming/01-connect/09-rest-api.md +++ b/docs/en/15-programming/01-connect/09-rest-api.md @@ -1,7 +1,7 @@ --- -sidebar_label: REST and Schemaless -title: REST and Schemaless -description: Connect to TDengine Cloud Service through RESTful API or Schemaless +sidebar_label: REST API +title: REST API +description: Connect to TDengine Cloud Service through RESTful API --- @@ -49,30 +49,3 @@ curl -L \ -d "select name, ntables, status from information_schema.ins_databases;" \ $TDENGINE_CLOUD_URL/rest/sql?token=$TDENGINE_CLOUD_TOKEN ``` - -## Schemaless - -### InfluxDB Line Protocol - -You can use any client that supports the http protocol to access the RESTful interface address `${TDENGINE_CLOUD_URL}/influxdb/v1/write` to write data in InfluxDB compatible format to TDengine. The EndPoint is as follows: - -```text -/influxdb/v1/write?db=&token=${TDENGINE_CLOUD_TOKEN} -``` - -Support InfluxDB query parameters as follows. - -- `db` Specifies the database name used by TDengine -- `precision` The time precision used by TDengine - -Note: InfluxDB token authorization is not supported at present. Only Basic authorization and query parameter validation are supported. - -### OpenTSDB Json and Telnet Protocol - -You can use any client that supports the http protocol to access the RESTful interface address `${TDENGINE_CLOUD_URL}/opentsdb/v1/put` to write data in OpenTSDB compatible format to TDengine. The EndPoint is as follows: - -```text -/opentsdb/v1/put/json/?token=${TDENGINE_CLOUD_TOKEN} -/opentsdb/v1/put/telnet/?token=${TDENGINE_CLOUD_TOKEN} -``` - diff --git a/docs/en/15-programming/03-insert.md b/docs/en/15-programming/03-insert.md index 4cb731e681..068bfb44ec 100644 --- a/docs/en/15-programming/03-insert.md +++ b/docs/en/15-programming/03-insert.md @@ -1,7 +1,105 @@ --- sidebar_label: Insert -title: Insert Data Into TDengine -description: Programming Guide for Inserting Data into TDengine +title: Insert Data Using SQL +description: Programming Guide for Inserting data using TDengine SQL --- -To quickly start your programming about writing data into TDengine, please refer to [Insert Data](../../data-in/insert-data). \ No newline at end of file + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +## SQL Examples + +Here are some brief examples for `INSET` statement. You can execute these statements manually by TDengine CLI or TDengine Cloud Explorer or programmatically by TDengine connectors. + +### Insert Single Row + +The below SQL statement is used to insert one row into table "d1001". + +```sql +INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31); +``` + +### Insert Multiple Rows + +Multiple rows can be inserted in a single SQL statement. The example below inserts 2 rows into table "d1001". + +```sql +INSERT INTO d1001 VALUES (1538548684000, 10.2, 220, 0.23) (1538548696650, 10.3, 218, 0.25); +``` + +### Insert into Multiple Tables + +Data can be inserted into multiple tables in the same SQL statement. The example below inserts 2 rows into table "d1001" and 1 row into table "d1002". + +```sql +INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 218, 0.33) d1002 VALUES (1538548696800, 12.3, 221, 0.31); +``` + +For more details about `INSERT` please refer to [INSERT](https://docs.tdengine.com/cloud/taos-sql/insert). + + +## Connector Examples + +:::note +Before executing the sample code in this section, you need to firstly establish connection to TDegnine cloud service, please refer to [Connect to TDengine Cloud Service](../../programming/connect/). + +::: + + + + +In this example, we use `execute` method to execute SQL and get affected rows. The variable `conn` is an instance of class `taosrest.TaosRestConnection` we just created at [Connect Tutorial](../../programming/connect/python#connect). + +```python +{{#include docs/examples/python/develop_tutorial.py:insert}} +``` + + + +```java +{{#include docs/examples/java/src/main/java/com/taos/example/CloudTutorial.java:insert}} +``` + + + + +```go +{{#include docs/examples/go/tutorial/main.go:insert}} +``` + + + + +In this example, we use `exec` method to execute SQL. `exec` is designed for some non-query SQL statements, all returned data would be ignored. + +```rust +{{#include docs/examples/rust/cloud-example/examples/tutorial.rs:insert}} +``` + + + + +```javascript +{{#include docs/examples/node/insert.js}} +``` + + + + + +``` XML +{{#include docs/examples/csharp/cloud-example/inout/inout.csproj}} +``` + +```csharp +{{#include docs/examples/csharp/cloud-example/inout/Program.cs:insert}} +``` + + + + + +:::note +`Use` statement is not applicable for cloud service since REST API is stateless. +::: \ No newline at end of file diff --git a/docs/en/15-programming/04-query.md b/docs/en/15-programming/04-query.md index 91ddae8fbb..02940ba5c9 100644 --- a/docs/en/15-programming/04-query.md +++ b/docs/en/15-programming/04-query.md @@ -1,7 +1,242 @@ --- sidebar_label: Query -title: Query Data From TDengine -description: Programming Guide for Querying Data +title: Query Data Using SQL +description: Programming Guide for Querying Data using basic SQL. --- -To quickly start your programming about querying data from TDengine, please refer to [Query Data](../../data-out/query-data). \ No newline at end of file +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +## Query Using SQL + +SQL is used by TDengine as its query language. Application programs can send SQL statements to TDengine through REST API or connectors. TDengine's CLI `taos` can also be used to execute ad hoc SQL queries. Here is the list of major query functionalities supported by TDengine: + +- Query on single column or multiple columns +- Filter on tags or data columns:>, <, =, <\>, like +- Grouping of results: `Group By` +- Sorting of results: `Order By` +- Limit the number of results: `Limit/Offset` +- Arithmetic on columns of numeric types or aggregate results +- Join query with timestamp alignment +- Aggregate functions: count, max, min, avg, sum, twa, stddev, leastsquares, top, bottom, first, last, percentile, apercentile, last_row, spread, diff + +For example, the SQL statement below can be executed in TDengine CLI `taos` to select records with voltage greater than 215 and limit the output to only 2 rows. + +```sql +select * from d1001 where voltage > 215 order by ts desc limit 2; +``` + +```title=Output +taos> select * from d1001 where voltage > 215 order by ts desc limit 2; + ts | current | voltage | phase | +====================================================================================== + 2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 | + 2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 | +Query OK, 2 row(s) in set (0.001100s) +``` + +To meet the requirements of varied use cases, some special functions have been added in TDengine. Some examples are `twa` (Time Weighted Average), `spread` (The difference between the maximum and the minimum), and `last_row` (the last row). Furthermore, continuous query is also supported in TDengine. + +For detailed query syntax please refer to [Select](https://docs.tdengine.com/cloud/taos-sql/select). + +## Aggregation among Tables + +In most use cases, there are always multiple kinds of data collection points. A new concept, called STable (abbreviation for super table), is used in TDengine to represent one type of data collection point, and a subtable is used to represent a specific data collection point of that type. Tags are used by TDengine to represent the static properties of data collection points. A specific data collection point has its own values for static properties. By specifying filter conditions on tags, aggregation can be performed efficiently among all the subtables created via the same STable, i.e. same type of data collection points. Aggregate functions applicable for tables can be used directly on STables; the syntax is exactly the same. + +In summary, records across subtables can be aggregated by a simple query on their STable. It is like a join operation. However, tables belonging to different STables can not be aggregated. + +### Example 1 + +In TDengine CLI `taos`, use the SQL below to get the average voltage of all the meters in California grouped by location. + +``` +taos> SELECT AVG(voltage) FROM meters GROUP BY location; + avg(voltage) | location | +============================================================= + 222.000000000 | California.LosAngeles | + 219.200000000 | California.SanFrancisco | +Query OK, 2 row(s) in set (0.002136s) +``` + +### Example 2 + +In TDengine CLI `taos`, use the SQL below to get the number of rows and the maximum current in the past 24 hours from meters whose groupId is 2. + +``` +taos> SELECT count(*), max(current) FROM meters where groupId = 2 and ts > now - 24h; + count(*) | max(current) | +================================== + 5 | 13.4 | +Query OK, 1 row(s) in set (0.002136s) +``` + +Join queries are only allowed between subtables of the same STable. In [Select](https://docs.tdengine.com/cloud/taos-sql/select), all query operations are marked as to whether they support STables or not. + +## Down Sampling and Interpolation + +In IoT use cases, down sampling is widely used to aggregate data by time range. The `INTERVAL` keyword in TDengine can be used to simplify the query by time window. For example, the SQL statement below can be used to get the sum of current every 10 seconds from meters table d1001. + +``` +taos> SELECT sum(current) FROM d1001 INTERVAL(10s); + ts | sum(current) | +====================================================== + 2018-10-03 14:38:00.000 | 10.300000191 | + 2018-10-03 14:38:10.000 | 24.900000572 | +Query OK, 2 row(s) in set (0.000883s) +``` + +Down sampling can also be used for STable. For example, the below SQL statement can be used to get the sum of current from all meters in California. + +``` +taos> SELECT SUM(current) FROM meters where location like "California%" INTERVAL(1s); + ts | sum(current) | +====================================================== + 2018-10-03 14:38:04.000 | 10.199999809 | + 2018-10-03 14:38:05.000 | 32.900000572 | + 2018-10-03 14:38:06.000 | 11.500000000 | + 2018-10-03 14:38:15.000 | 12.600000381 | + 2018-10-03 14:38:16.000 | 36.000000000 | +Query OK, 5 row(s) in set (0.001538s) +``` + +Down sampling also supports time offset. For example, the below SQL statement can be used to get the sum of current from all meters but each time window must start at the boundary of 500 milliseconds. + +``` +taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a); + ts | sum(current) | +====================================================== + 2018-10-03 14:38:04.500 | 11.189999809 | + 2018-10-03 14:38:05.500 | 31.900000572 | + 2018-10-03 14:38:06.500 | 11.600000000 | + 2018-10-03 14:38:15.500 | 12.300000381 | + 2018-10-03 14:38:16.500 | 35.000000000 | +Query OK, 5 row(s) in set (0.001521s) +``` + +In many use cases, it's hard to align the timestamp of the data collected by each collection point. However, a lot of algorithms like FFT require the data to be aligned with same time interval and application programs have to handle this by themselves. In TDengine, it's easy to achieve the alignment using down sampling. + +Interpolation can be performed in TDengine if there is no data in a time range. + +For more details please refer to [Aggregate by Window](https://docs.tdengine.com/cloud/taos-sql/interval). + +## Connector Examples + +:::note +Before executing the sample code in this section, you need to firstly establish connection to TDegnine cloud service, please refer to [Connect to TDengine Cloud Service](../../programming/connect/). + +::: + + + + +In this example, we use `query` method to execute SQL and get a `result` object. + +```python +{{#include docs/examples/python/develop_tutorial.py:query:nrc}} +``` + +Get column metadata(column name, column type and column length) from `result`: + +```python +{{#include docs/examples/python/develop_tutorial.py:fields:nrc}} +``` + +Get total rows from `result`: + +```python +{{#include docs/examples/python/develop_tutorial.py:rows:nrc}} +``` + +Iterate over each rows: + +```python +{{#include docs/examples/python/develop_tutorial.py:iter}} +``` + + + + +In this example we use `executeQuery` method of `Statement` object and get a `ResultSet` object. + +```java +{{#include docs/examples/java/src/main/java/com/taos/example/CloudTutorial.java:query:nrc}} +``` + +Get column meta from the result: + +```java +{{#include docs/examples/java/src/main/java/com/taos/example/CloudTutorial.java:meta:nrc}} +``` + +Iterate over the result and print each row: + +```java +{{#include docs/examples/java/src/main/java/com/taos/example/CloudTutorial.java:iter}} +``` + + + + +In this example we use `Query` method to execute SQL and get a `sql.Rows` object. + +```go +{{#include docs/examples/go/tutorial/main.go:query:nrc}} +``` + +Get column names from rows: + +```go +{{#include docs/examples/go/tutorial/main.go:meta:nrc}} +``` + +Iterate over rows and print each row: + +```go +{{#include docs/examples/go/tutorial/main.go:iter}} +``` + + + + +In this example, we use query method to execute SQL and get a result object. + +```rust +{{#include docs/examples/rust/cloud-example/examples/tutorial.rs:query:nrc}} +``` + +Get column meta from the result: + +```rust +{{#include docs/examples/rust/cloud-example/examples/tutorial.rs:meta:nrc}} +``` + +Get all rows and print each row: + +```rust +{{#include docs/examples/rust/cloud-example/examples/tutorial.rs:iter}} +``` + + + + +```javascript +{{#include docs/examples/node/query.js}} +``` + + + + + +In this example, we use query method to execute SQL and get a result object. + +``` XML +{{#include docs/examples/csharp/cloud-example/inout/inout.csproj}} +``` + +```C# +{{#include docs/examples/csharp/cloud-example/inout/Program.cs:query}} +``` + + + + diff --git a/docs/en/15-programming/06-connector/index.md b/docs/en/15-programming/06-connector/index.md index f37bf329a8..9d17eaebe3 100644 --- a/docs/en/15-programming/06-connector/index.md +++ b/docs/en/15-programming/06-connector/index.md @@ -1,5 +1,5 @@ --- -sidebar_label: Connector +sidebar_label: Connector Reference title: Connector Reference description: 'Reference guide for connectors' --- diff --git a/docs/en/17-taos-sql/01-data-type.md b/docs/en/17-taos-sql/01-data-type.md deleted file mode 100644 index b830994ac9..0000000000 --- a/docs/en/17-taos-sql/01-data-type.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -sidebar_label: Data Types -title: Data Types -description: "TDengine supports a variety of data types including timestamp, float, JSON and many others." ---- - -## Timestamp - -When using TDengine to store and query data, the most important part of the data is timestamp. Timestamp must be specified when creating and inserting data rows. Timestamp must follow the rules below: - -- The format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128` -- Internal function `now` can be used to get the current timestamp on the client side -- The current timestamp of the client side is applied when `now` is used to insert data -- Epoch Time:timestamp can also be a long integer number, which means the number of seconds, milliseconds or nanoseconds, depending on the time precision, from 1970-01-01 00:00:00.000 (UTC/GMT) -- Add/subtract operations can be carried out on timestamps. For example `now-2h` means 2 hours prior to the time at which query is executed. The units of time in operations can be b(nanosecond), u(microsecond), a(millisecond), s(second), m(minute), h(hour), d(day), or w(week). So `select * from t1 where ts > now-2w and ts <= now-1w` means the data between two weeks ago and one week ago. The time unit can also be n (calendar month) or y (calendar year) when specifying the time window for down sampling operations. - -Time precision in TDengine can be set by the `PRECISION` parameter when executing `CREATE DATABASE`. The default time precision is millisecond. In the statement below, the precision is set to nanonseconds. - -```sql -CREATE DATABASE db_name PRECISION 'ns'; -``` -## Data Types - -In TDengine, the data types below can be used when specifying a column or tag. - -| # | **type** | **Bytes** | **Description** | -| --- | :-------: | --------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| 1 | TIMESTAMP | 8 | Default precision is millisecond, microsecond and nanosecond are also supported | -| 2 | INT | 4 | Integer, the value range is [-2^31, 2^31-1] | -| 3 | INT UNSIGNED| 4| unsigned integer, the value range is [0, 2^32-1] -| 4 | BIGINT | 8 | Long integer, the value range is [-2^63, 2^63-1] | -| 5 | BIGINT UNSIGNED | 8 | unsigned long integer, the value range is [0, 2^64-1] | -| 6 | FLOAT | 4 | Floating point number, the effective number of digits is 6-7, the value range is [-3.4E38, 3.4E38] | -| 7 | DOUBLE | 8 | Double precision floating point number, the effective number of digits is 15-16, the value range is [-1.7E308, 1.7E308] | -| 8 | BINARY | User Defined | Single-byte string for ASCII visible characters. Length must be specified when defining a column or tag of binary type. | -| 9 | SMALLINT | 2 | Short integer, the value range is [-32768, 32767] | -| 10 | INT UNSIGNED| 2| unsigned integer, the value range is [0, 65535]| -| 11 | TINYINT | 1 | Single-byte integer, the value range is [-128, 127] | -| 12 | TINYINT UNSIGNED | 1 | unsigned single-byte integer, the value range is [0, 255] | -| 13 | BOOL | 1 | Bool, the value range is {true, false} | -| 14 | NCHAR | User Defined| Multi-Byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\’`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. | -| 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type | -| 16 | VARCHAR | User-defined | Alias of BINARY | - - -:::note -- TDengine is case insensitive and treats any characters in the sql command as lower case by default, case sensitive strings must be quoted with single quotes. -- Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type. -- The length of BINARY can be up to 16374 bytes. The string value must be quoted with single quotes. You must specify a length in bytes for a BINARY value, for example binary(20) for up to twenty single-byte characters. If the data exceeds the specified length, an error will occur. The literal single quote inside the string must be preceded with back slash like `\'` -- Numeric values in SQL statements will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number. - -::: - - -## Constants -TDengine supports a variety of constants: - -| # | **Syntax** | **Type** | **Description** | -| --- | :-------: | --------- | -------------------------------------- | -| 1 | [{+ \| -}]123 | BIGINT | Integer literals are of type BIGINT. Data that exceeds the length of the BIGINT type is truncated. | -| 2 | 123.45 | DOUBLE | Floating-point literals are of type DOUBLE. Numeric values will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used. | -| 3 | 1.2E3 | DOUBLE | Literals in scientific notation are of type DOUBLE. | -| 4 | 'abc' | BINARY | Content enclosed in single quotation marks is of type BINARY. The size of a BINARY is the size of the string in bytes. A literal single quote inside the string must be escaped with a backslash (\'). | -| 5 | 'abc' | BINARY | Content enclosed in double quotation marks is of type BINARY. The size of a BINARY is the size of the string in bytes. A literal double quote inside the string must be escaped with a backslash (\"). | -| 6 | TIMESTAMP {'literal' \| "literal"} | TIMESTAMP | The TIMESTAMP keyword indicates that the following string literal is interpreted as a timestamp. The string must be in YYYY-MM-DD HH:mm:ss.MS format. The precision is inherited from the database configuration. | -| 7 | {TRUE \| FALSE} | BOOL | Boolean literals are of type BOOL. | -| 8 | {'' \| "" \| '\t' \| "\t" \| ' ' \| " " \| NULL } | -- | The preceding characters indicate null literals. These can be used with any data type. | - -:::note -Numeric values will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number. - -::: diff --git a/docs/en/17-taos-sql/02-database.md b/docs/en/17-taos-sql/02-database.md deleted file mode 100644 index d9dadae976..0000000000 --- a/docs/en/17-taos-sql/02-database.md +++ /dev/null @@ -1,156 +0,0 @@ ---- -sidebar_label: Database -title: Database -description: "create and drop database, show or change database parameters" ---- - -## Create a Database - -```sql -CREATE DATABASE [IF NOT EXISTS] db_name [database_options] - -database_options: - database_option ... - -database_option: { - BUFFER value - | CACHEMODEL {'none' | 'last_row' | 'last_value' | 'both'} - | CACHESIZE value - | COMP {0 | 1 | 2} - | DURATION value - | WAL_FSYNC_PERIOD value - | MAXROWS value - | MINROWS value - | KEEP value - | PAGES value - | PAGESIZE value - | PRECISION {'ms' | 'us' | 'ns'} - | REPLICA value - | RETENTIONS ingestion_duration:keep_duration ... - | STRICT {'off' | 'on'} - | WAL_LEVEL {1 | 2} - | VGROUPS value - | SINGLE_STABLE {0 | 1} - | WAL_RETENTION_PERIOD value - | WAL_ROLL_PERIOD value - | WAL_RETENTION_SIZE value - | WAL_SEGMENT_SIZE value -} -``` - -## Parameters - -- BUFFER: specifies the size (in MB) of the write buffer for each vnode. Enter a value between 3 and 16384. The default value is 96. -- CACHEMODEL: specifies how the latest data in subtables is stored in the cache. The default value is none. - - none: The latest data is not cached. - - last_row: The last row of each subtable is cached. This option significantly improves the performance of the LAST_ROW function. - - last_value: The last non-null value of each column in each subtable is cached. This option significantly improves the performance of the LAST function under normal circumstances, such as statements including the WHERE, ORDER BY, GROUP BY, and INTERVAL keywords. - - both: The last row of each subtable and the last non-null value of each column in each subtable are cached. -- CACHESIZE: specifies the amount (in MB) of memory used for subtable caching on each vnode. Enter a value between 1 and 65536. The default value is 1. -- COMP: specifies how databases are compressed. The default value is 2. - - 0: Compression is disabled. - - 1: One-pass compression is enabled. - - 2: Two-pass compression is enabled. -- DURATION: specifies the time period contained in each data file. After the time specified by this parameter has elapsed, TDengine creates a new data file to store incoming data. You can use m (minutes), h (hours), and d (days) as the unit, for example DURATION 100h or DURATION 10d. If you do not include a unit, d is used by default. -- WAL_FSYNC_PERIOD: specifies the interval (in milliseconds) at which data is written from the WAL to disk. This parameter takes effect only when the WAL parameter is set to 2. The default value is 3000. Enter a value between 0 and 180000. The value 0 indicates that incoming data is immediately written to disk. -- MAXROWS: specifies the maximum number of rows recorded in a block. The default value is 4096. -- MINROWS: specifies the minimum number of rows recorded in a block. The default value is 100. -- KEEP: specifies the time for which data is retained. Enter a value between 1 and 365000. The default value is 3650. The value of the KEEP parameter must be greater than or equal to the value of the DURATION parameter. TDengine automatically deletes data that is older than the value of the KEEP parameter. You can use m (minutes), h (hours), and d (days) as the unit, for example KEEP 100h or KEEP 10d. If you do not include a unit, d is used by default. -- PAGES: specifies the number of pages in the metadata storage engine cache on each vnode. Enter a value greater than or equal to 64. The default value is 256. The space occupied by metadata storage on each vnode is equal to the product of the values of the PAGESIZE and PAGES parameters. The space occupied by default is 1 MB. -- PAGESIZE: specifies the size (in KB) of each page in the metadata storage engine cache on each vnode. The default value is 4. Enter a value between 1 and 16384. -- PRECISION: specifies the precision at which a database records timestamps. Enter ms for milliseconds, us for microseconds, or ns for nanoseconds. The default value is ms. -- REPLICA: specifies the number of replicas that are made of the database. Enter 1 or 3. The default value is 1. The value of the REPLICA parameter cannot exceed the number of dnodes in the cluster. -- RETENTIONS: specifies the retention period for data aggregated at various intervals. For example, RETENTIONS 15s:7d,1m:21d,15m:50d indicates that data aggregated every 15 seconds is retained for 7 days, data aggregated every 1 minute is retained for 21 days, and data aggregated every 15 minutes is retained for 50 days. You must enter three aggregation intervals and corresponding retention periods. -- STRICT: specifies whether strong data consistency is enabled. The default value is off. - - on: Strong consistency is enabled and implemented through the Raft consensus algorithm. In this mode, an operation is considered successful once it is confirmed by half of the nodes in the cluster. - - off: Strong consistency is disabled. In this mode, an operation is considered successful when it is initiated by the local node. -- WAL_LEVEL: specifies whether fsync is enabled. The default value is 1. - - 1: WAL is enabled but fsync is disabled. - - 2: WAL and fsync are both enabled. -- VGROUPS: specifies the initial number of vgroups when a database is created. -- SINGLE_STABLE: specifies whether the database can contain more than one supertable. - - 0: The database can contain multiple supertables. - - 1: The database can contain only one supertable. -- WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. Enter a time in seconds. The default value is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. -- WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. Enter a size in KB. The default value is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. -- WAL_ROLL_PERIOD: specifies the time after which WAL files are rotated. After this period elapses, a new WAL file is created. The default value is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk. -- WAL_SEGMENT_SIZE: specifies the maximum size of a WAL file. After the current WAL file reaches this size, a new WAL file is created. The default value is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk. - -### Example Statement - -```sql -create database if not exists db vgroups 10 buffer 10 - -``` - -The preceding SQL statement creates a database named db that has 10 vgroups and whose vnodes have a 10 MB cache. - -### Specify the Database in Use - -``` -USE db_name; -``` - -The preceding SQL statement switches to the specified database. (If you connect to TDengine over the REST API, this statement does not take effect.) - -## Drop a Database - -``` -DROP DATABASE [IF EXISTS] db_name -``` - -The preceding SQL statement deletes the specified database. This statement will delete all tables in the database and destroy all vgroups associated with it. Exercise caution when using this statement. - -## Change Database Configuration - -```sql -ALTER DATABASE db_name [alter_database_options] - -alter_database_options: - alter_database_option ... - -alter_database_option: { - CACHEMODEL {'none' | 'last_row' | 'last_value' | 'both'} - | CACHESIZE value - | WAL_LEVEL value - | WAL_FSYNC_PERIOD value - | KEEP value -} -``` - -:::note -Other parameters cannot be modified after the database has been created. - -::: - -## View Databases - -### View All Databases - -``` -SHOW DATABASES; -``` - -### View the CREATE Statement for a Database - -``` -SHOW CREATE DATABASE db_name; -``` - -The preceding SQL statement can be used in migration scenarios. This command can be used to get the CREATE statement, which can be used in another TDengine instance to create the exact same database. - -### View Database Configuration - -```sql -SHOW DATABASES \G; -``` - -The preceding SQL statement shows the value of each parameter for the specified database. One value is displayed per line. - -## Delete Expired Data - -```sql -TRIM DATABASE db_name; -``` - -The preceding SQL statement deletes data that has expired and orders the remaining data in accordance with the storage configuration. diff --git a/docs/en/17-taos-sql/03-table.md b/docs/en/17-taos-sql/03-table.md deleted file mode 100644 index bf32cf171b..0000000000 --- a/docs/en/17-taos-sql/03-table.md +++ /dev/null @@ -1,198 +0,0 @@ ---- -title: Table ---- - -## Create Table - -You create standard tables and subtables with the `CREATE TABLE` statement. - -```sql -CREATE TABLE [IF NOT EXISTS] [db_name.]tb_name (create_definition [, create_definitionn] ...) [table_options] - -CREATE TABLE create_subtable_clause - -CREATE TABLE [IF NOT EXISTS] [db_name.]tb_name (create_definition [, create_definitionn] ...) - [TAGS (create_definition [, create_definitionn] ...)] - [table_options] - -create_subtable_clause: { - create_subtable_clause [create_subtable_clause] ... - | [IF NOT EXISTS] [db_name.]tb_name USING [db_name.]stb_name [(tag_name [, tag_name] ...)] TAGS (tag_value [, tag_value] ...) -} - -create_definition: - col_name column_definition - -column_definition: - type_name [comment 'string_value'] - -table_options: - table_option ... - -table_option: { - COMMENT 'string_value' - | WATERMARK duration[,duration] - | MAX_DELAY duration[,duration] - | ROLLUP(func_name [, func_name] ...) - | SMA(col_name [, col_name] ...) - | TTL value -} - -``` - -**More explanations** - -1. The first column of a table MUST be of type TIMESTAMP. It is automatically set as the primary key. -2. The maximum length of the table name is 192 bytes. -3. The maximum length of each row is 48k bytes, please note that the extra 2 bytes used by each BINARY/NCHAR column are also counted. -4. The name of the subtable can only consist of characters from the English alphabet, digits and underscore. Table names can't start with a digit. Table names are case insensitive. -5. The maximum length in bytes must be specified when using BINARY or NCHAR types. -6. Escape character "\`" can be used to avoid the conflict between table names and reserved keywords, above rules will be bypassed when using escape character on table names, but the upper limit for the name length is still valid. The table names specified using escape character are case sensitive. - For example \`aBc\` and \`abc\` are different table names but `abc` and `aBc` are same table names because they are both converted to `abc` internally. - Only ASCII visible characters can be used with escape character. - -**Parameter description** -1. COMMENT: specifies comments for the table. This parameter can be used with supertables, standard tables, and subtables. -2. WATERMARK: specifies the time after which the window is closed. The default value is 5 seconds. Enter a value between 0 and 15 minutes in milliseconds, seconds, or minutes. You can enter multiple values separated by commas (,). This parameter applies only to supertables and takes effect only when the RETENTIONS parameter has been specified for the database. -3. MAX_DELAY: specifies the maximum latency for pushing computation results. The default value is 15 minutes or the value of the INTERVAL parameter, whichever is smaller. Enter a value between 0 and 15 minutes in milliseconds, seconds, or minutes. You can enter multiple values separated by commas (,). Note: Retain the default value if possible. Configuring a small MAX_DELAY may cause results to be frequently pushed, affecting storage and query performance. This parameter applies only to supertables and takes effect only when the RETENTIONS parameter has been specified for the database. -4. ROLLUP: specifies aggregate functions to roll up. Rolling up a function provides downsampled results based on multiple axes. This parameter applies only to supertables and takes effect only when the RETENTIONS parameter has been specified for the database. You can specify only one function to roll up. The rollup takes effect on all columns except TS. Enter one of the following values: avg, sum, min, max, last, or first. -5. SMA: specifies functions on which to enable small materialized aggregates (SMA). SMA is user-defined precomputation of aggregates based on data blocks. Enter one of the following values: max, min, or sum This parameter can be used with supertables and standard tables. -6. TTL: specifies the time to live (TTL) for the table. If the period specified by the TTL parameter elapses without any data being written to the table, TDengine will automatically delete the table. Note: The system may not delete the table at the exact moment that the TTL expires. Enter a value in days. The default value is 0. Note: The TTL parameter has a higher priority than the KEEP parameter. If a table is marked for deletion because the TTL has expired, it will be deleted even if the time specified by the KEEP parameter has not elapsed. This parameter can be used with standard tables and subtables. - -## Create Subtables - -### Create a Subtable - -```sql -CREATE TABLE [IF NOT EXISTS] tb_name USING stb_name TAGS (tag_value1, ...); -``` - -### Create a Subtable with Specified Tags - -```sql -CREATE TABLE [IF NOT EXISTS] tb_name USING stb_name (tag_name1, ...) TAGS (tag_value1, ...); -``` - -The preceding SQL statement creates a subtable based on a supertable but specifies a subset of tags to use. Tags that are not included in this subset are assigned a null value. - -### Create Multiple Subtables - -```sql -CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) [IF NOT EXISTS] tb_name2 USING stb_name TAGS (tag_value2, ...) ...; -``` - -You can create multiple subtables in a single SQL statement provided that all subtables use the same supertable. For performance reasons, do not create more than 3000 tables per statement. - -## Modify a Table - -```sql -ALTER TABLE [db_name.]tb_name alter_table_clause - -alter_table_clause: { - alter_table_options - | ADD COLUMN col_name column_type - | DROP COLUMN col_name - | MODIFY COLUMN col_name column_type - | RENAME COLUMN old_col_name new_col_name -} - -alter_table_options: - alter_table_option ... - -alter_table_option: { - TTL value - | COMMENT 'string_value' -} - -``` - -**More explanations** -You can perform the following modifications on existing tables: -1. ADD COLUMN: adds a column to the supertable. -2. DROP COLUMN: deletes a column from the supertable. -3. MODIFY COLUMN: changes the length of the data type specified for the column. Note that you can only specify a length greater than the current length. -4. RENAME COLUMN: renames a specified column in the table. - -### Add a Column - -```sql -ALTER TABLE tb_name ADD COLUMN field_name data_type; -``` - -### Delete a Column - -```sql -ALTER TABLE tb_name DROP COLUMN field_name; -``` - -### Modify the Data Length - -```sql -ALTER TABLE tb_name MODIFY COLUMN field_name data_type(length); -``` - -### Rename a Column - -```sql -ALTER TABLE tb_name RENAME COLUMN old_col_name new_col_name -``` - -## Modify a Subtable - -```sql -ALTER TABLE [db_name.]tb_name alter_table_clause - -alter_table_clause: { - alter_table_options - | SET TAG tag_name = new_tag_value -} - -alter_table_options: - alter_table_option ... - -alter_table_option: { - TTL value - | COMMENT 'string_value' -} -``` - -**More explanations** -1. Only the value of a tag can be modified directly. For all other modifications, you must modify the supertable from which the subtable was created. - -### Change Tag Value Of Sub Table - -``` -ALTER TABLE tb_name SET TAG tag_name=new_tag_value; -``` - -## Delete a Table - -The following SQL statement deletes one or more tables. - -```sql -DROP TABLE [IF EXISTS] [db_name.]tb_name [, [IF EXISTS] [db_name.]tb_name] ... -``` - -## View Tables - -### View All Tables - -The following SQL statement shows all tables in the current database. - -```sql -SHOW TABLES [LIKE tb_name_wildchar]; -``` - -### View the CREATE Statement for a Table - -``` -SHOW CREATE TABLE tb_name; -``` - -This command is useful in migrating data from one TDengine cluster to another because it can be used to create the exact same tables in the target database. - -## View the Table Schema - -``` -DESCRIBE [db_name.]tb_name; -``` \ No newline at end of file diff --git a/docs/en/17-taos-sql/04-stable.md b/docs/en/17-taos-sql/04-stable.md deleted file mode 100644 index b8aed1dbe1..0000000000 --- a/docs/en/17-taos-sql/04-stable.md +++ /dev/null @@ -1,160 +0,0 @@ ---- -sidebar_label: Supertable -title: Supertable -description: Operations about Super Tables. ---- - -## Create a Supertable - -```sql -CREATE STABLE [IF NOT EXISTS] stb_name (create_definition [, create_definitionn] ...) TAGS (create_definition [, create_definition] ...) [table_options] - -create_definition: - col_name column_definition - -column_definition: - type_name [COMMENT 'string_value'] -``` - -**More explanations** -- Each supertable can have a maximum of 4096 columns, including tags. The minimum number of columns is 3: a timestamp column used as the key, one tag column, and one data column. -- When you create a supertable, you can add comments to columns and tags. -- The TAGS keyword defines the tag columns for the supertable. The following restrictions apply to tag columns: - - A tag column can use the TIMESTAMP data type, but the values in the column must be fixed numbers. Timestamps including formulae, such as "now + 10s", cannot be stored in a tag column. - - The name of a tag column cannot be the same as the name of any other column. - - The name of a tag column cannot be a reserved keyword. - - Each supertable must contain between 1 and 128 tags. The total length of the TAGS keyword cannot exceed 16 KB. -- For more information about table parameters, see Create a Table. - -## View a Supertable - -### View All Supertables - -``` -SHOW STABLES [LIKE tb_name_wildcard]; -``` - -The preceding SQL statement shows all supertables in the current TDengine database, including the name, creation time, number of columns, number of tags, and number of subtables for each supertable. - -### View the CREATE Statement for a Supertable - -``` -SHOW CREATE STABLE stb_name; -``` - -The preceding SQL statement can be used in migration scenarios. It returns the CREATE statement that was used to create the specified supertable. You can then use the returned statement to create an identical supertable on another TDengine database. - -## View the Supertable Schema - -``` -DESCRIBE [db_name.]stb_name; -``` - -## Drop STable - -``` -DROP STABLE [IF EXISTS] [db_name.]stb_name -``` - -Note: Deleting a supertable will delete all subtables created from the supertable, including all data within those subtables. - -## Modify a Supertable - -```sql -ALTER STABLE [db_name.]tb_name alter_table_clause - -alter_table_clause: { - alter_table_options - | ADD COLUMN col_name column_type - | DROP COLUMN col_name - | MODIFY COLUMN col_name column_type - | ADD TAG tag_name tag_type - | DROP TAG tag_name - | MODIFY TAG tag_name tag_type - | RENAME TAG old_tag_name new_tag_name -} - -alter_table_options: - alter_table_option ... - -alter_table_option: { - COMMENT 'string_value' -} - -``` - -**More explanations** - -Modifications to the table schema of a supertable take effect on all subtables within the supertable. You cannot modify the table schema of subtables individually. When you modify the tag schema of a supertable, the modifications automatically take effect on all of its subtables. - -- ADD COLUMN: adds a column to the supertable. -- DROP COLUMN: deletes a column from the supertable. -- MODIFY COLUMN: changes the length of a BINARY or NCHAR column. Note that you can only specify a length greater than the current length. -- ADD TAG: adds a tag to the supertable. -- DROP TAG: deletes a tag from the supertable. When you delete a tag from a supertable, it is automatically deleted from all subtables within the supertable. -- MODIFY TAG: modifies the definition of a tag in the supertable. You can use this keyword to change the length of a BINARY or NCHAR tag column. Note that you can only specify a length greater than the current length. -- RENAME TAG: renames a specified tag in the supertable. When you rename a tag in a supertable, it is automatically renamed in all subtables within the supertable. - -### Add a Column - -``` -ALTER STABLE stb_name ADD COLUMN col_name column_type; -``` - -### Delete a Column - -``` -ALTER STABLE stb_name DROP COLUMN col_name; -``` - -### Modify the Data Length - -``` -ALTER STABLE stb_name MODIFY COLUMN col_name data_type(length); -``` - -The preceding SQL statement changes the length of a BINARY or NCHAR data column. Note that you can only specify a length greater than the current length. - -### Add A Tag - -``` -ALTER STABLE stb_name ADD TAG tag_name tag_type; -``` - -The preceding SQL statement adds a tag of the specified type to the supertable. A supertable cannot contain more than 128 tags. The total length of all tags in a supertable cannot exceed 16 KB. - -### Remove A Tag - -``` -ALTER STABLE stb_name DROP TAG tag_name; -``` - -The preceding SQL statement deletes a tag from the supertable. When you delete a tag from a supertable, it is automatically deleted from all subtables within the supertable. - -### Change A Tag - -``` -ALTER STABLE stb_name RENAME TAG old_tag_name new_tag_name; -``` - -The preceding SQL statement renames a tag in the supertable. When you rename a tag in a supertable, it is automatically renamed in all subtables within the supertable. - -### Change Tag Length - -``` -ALTER STABLE stb_name MODIFY TAG tag_name data_type(length); -``` - -The preceding SQL statement changes the length of a BINARY or NCHAR tag column. Note that you can only specify a length greater than the current length. (Available in 2.1.3.0 and later versions) - -### View a Supertable -You can run projection and aggregate SELECT queries on supertables, and you can filter by tag or column by using the WHERE keyword. - -If you do not include an ORDER BY clause, results are returned by subtable. These results are not ordered. You can include an ORDER BY clause in your query to strictly order the results. - - - -:::note -All tag operations except for updating the value of a tag must be performed on the supertable and not on individual subtables. If you add a tag to an existing supertable, the tag is automatically added with a null value to all subtables within the supertable. - -::: diff --git a/docs/en/17-taos-sql/05-insert.md b/docs/en/17-taos-sql/05-insert.md deleted file mode 100644 index e798d1f804..0000000000 --- a/docs/en/17-taos-sql/05-insert.md +++ /dev/null @@ -1,132 +0,0 @@ ---- -sidebar_label: Insert -title: Insert -description: Insert data into TDengine ---- - -## Syntax - -```sql -INSERT INTO - tb_name - [USING stb_name [(tag1_name, ...)] TAGS (tag1_value, ...)] - [(field1_name, ...)] - VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path - [tb2_name - [USING stb_name [(tag1_name, ...)] TAGS (tag1_value, ...)] - [(field1_name, ...)] - VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path - ...]; -``` - -**Timestamps** - -1. All data writes must include a timestamp. With regard to timestamps, note the following: - -2. The precision of a timestamp depends on its format. The precision configured for the database affects only timestamps that are inserted as long integers (UNIX time). Timestamps inserted as date and time strings are not affected. As an example, the timestamp 2021-07-13 16:16:48 is equivalent to 1626164208 in UNIX time. This UNIX time is modified to 1626164208000 for databases with millisecond precision, 1626164208000000 for databases with microsecond precision, and 1626164208000000000 for databases with nanosecond precision. - -3. If you want to insert multiple rows simultaneously, do not use the NOW function in the timestamp. Using the NOW function in this situation will cause multiple rows to have the same timestamp and prevent them from being stored correctly. This is because the NOW function obtains the current time on the client, and multiple instances of NOW in a single statement will return the same time. - The earliest timestamp that you can use when inserting data is equal to the current time on the server minus the value of the KEEP parameter. The latest timestamp that you can use when inserting data is equal to the current time on the server plus the value of the DURATION parameter. You can configure the KEEP and DURATION parameters when you create a database. The default values are 3650 days for the KEEP parameter and 10 days for the DURATION parameter. - -**Syntax** - -1. The USING clause automatically creates the specified subtable if it does not exist. If it's unknown whether the table already exists, the table can be created automatically while inserting using the SQL statement below. To use this functionality, a STable must be used as template and tag values must be provided. Any tags that you do not specify will be assigned a null value. - -2. You can insert data into specified columns. Any columns in which you do not insert data will be assigned a null value. - -3. The VALUES clause inserts one or more rows of data into a table. - -4. The FILE clause inserts tags or data from a comma-separates values (CSV) file. Do not include headers in your CSV files. - -5. A single INSERT statement can write data to multiple tables. - -6. The INSERT statement is fully parsed before being executed, so that if any element of the statement fails, the entire statement will fail. For example, the following statement will not create a table because the latter part of the statement is invalid: - - ```sql - INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2) VALUES('a'); - ``` - -7. However, an INSERT statement that writes data to multiple subtables can succeed for some tables and fail for others. This situation is caused because vnodes perform write operations independently of each other. One vnode failing to write data does not affect the ability of other vnodes to write successfully. - -## Insert a Record - -Single row or multiple rows specified with VALUES can be inserted into a specific table. A single row is inserted using the below statement. - -```sql -INSERT INTO d1001 VALUES (NOW, 10.2, 219, 0.32); -``` - -## Insert Multiple Records - -Double rows are inserted using the below statement. - -```sql -INSERT INTO d1001 VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32) (1626164208000, 10.15, 217, 0.33); -``` - -## Write to a Specified Column - -Data can be inserted into specific columns, either single row or multiple row, while other columns will be inserted as NULL value. The key (timestamp) cannot be null. For example: - -```sql -INSERT INTO d1001 (ts, current, phase) VALUES ('2021-07-13 14:06:33.196', 10.27, 0.31); -``` - -## Insert Into Multiple Tables - -One or multiple rows can be inserted into multiple tables in a single SQL statement, with or without specifying specific columns. For example: - -```sql -INSERT INTO d1001 VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33) - d1002 (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31); -``` - -## Automatically Create Table When Inserting - -If it's unknown whether the table already exists, the table can be created automatically while inserting using the SQL statement below. To use this functionality, a STable must be used as template and tag values must be provided. For example: - -```sql -INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32); -``` - -It's not necessary to provide values for all tags when creating tables automatically, the tags without values provided will be set to NULL. For example: - -```sql -INSERT INTO d21001 USING meters (groupId) TAGS (2) VALUES ('2021-07-13 14:06:33.196', 10.15, 217, 0.33); -``` - -Multiple rows can also be inserted into the same table in a single SQL statement. For example: - -```sql -INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33) - d21002 USING meters (groupId) TAGS (2) VALUES ('2021-07-13 14:06:34.255', 10.15, 217, 0.33) - d21003 USING meters (groupId) TAGS (2) (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31); -``` - -## Insert Rows From A File - -Besides using `VALUES` to insert one or multiple rows, the data to be inserted can also be prepared in a CSV file with comma as separator and each field value quoted by single quotes. Table definition is not required in the CSV file. For example, if file "/tmp/csvfile.csv" contains the below data: - -``` -'2021-07-13 14:07:34.630', '10.2', '219', '0.32' -'2021-07-13 14:07:35.779', '10.15', '217', '0.33' -``` - -Then data in this file can be inserted by the SQL statement below: - -```sql -INSERT INTO d1001 FILE '/tmp/csvfile.csv'; -``` - -## Create Tables Automatically and Insert Rows From File - -```sql -INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile.csv'; -``` - -When writing data from a file, you can automatically create the specified subtable if it does not exist. For example: - -```sql -INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile_21001.csv' - d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv'; -``` diff --git a/docs/en/17-taos-sql/06-select.md b/docs/en/17-taos-sql/06-select.md deleted file mode 100644 index 75e0a5dedf..0000000000 --- a/docs/en/17-taos-sql/06-select.md +++ /dev/null @@ -1,414 +0,0 @@ ---- -sidebar_label: Select -title: Select -description: Query Data from TDengine. ---- - -## Syntax - -```sql -SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW() | TODAY() | TIMEZONE()} - -SELECT [DISTINCT] select_list - from_clause - [WHERE condition] - [PARTITION BY tag_list] - [window_clause] - [group_by_clause] - [order_by_clasue] - [SLIMIT limit_val [SOFFSET offset_val]] - [LIMIT limit_val [OFFSET offset_val]] - [>> export_file] - -select_list: - select_expr [, select_expr] ... - -select_expr: { - * - | query_name.* - | [schema_name.] {table_name | view_name} .* - | t_alias.* - | expr [[AS] c_alias] -} - -from_clause: { - table_reference [, table_reference] ... - | join_clause [, join_clause] ... -} - -table_reference: - table_expr t_alias - -table_expr: { - table_name - | view_name - | ( subquery ) -} - -join_clause: - table_reference [INNER] JOIN table_reference ON condition - -window_clause: { - SESSION(ts_col, tol_val) - | STATE_WINDOW(col) - | INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)] - -changes_option: { - DURATION duration_val - | ROWS rows_val -} - -group_by_clause: - GROUP BY expr [, expr] ... HAVING condition - -order_by_clasue: - ORDER BY order_expr [, order_expr] ... - -order_expr: - {expr | position | c_alias} [DESC | ASC] [NULLS FIRST | NULLS LAST] -``` - -## Lists - -A query can be performed on some or all columns. Data and tag columns can all be included in the SELECT list. - -## Wildcards - -You can use an asterisk (\*) as a wildcard character to indicate all columns. For standard tables, the asterisk indicates only data columns. For supertables and subtables, tag columns are also included. - -```sql -SELECT * FROM d1001; -``` - -You can use a table name as a prefix before an asterisk. For example, the following SQL statements both return all columns from the d1001 table: - -```sql -SELECT * FROM d1001; -SELECT d1001.* FROM d1001; -``` - -However, in a JOIN query, using a table name prefix with an asterisk returns different results. In this case, querying * returns all data in all columns in all tables (not including tags), whereas using a table name prefix returns all data in all columns in the specified table only. - -```sql -SELECT * FROM d1001, d1003 WHERE d1001.ts=d1003.ts; -SELECT d1001.* FROM d1001,d1003 WHERE d1001.ts = d1003.ts; -``` - -The first of the preceding SQL statements returns all columns from the d1001 and d1003 tables, but the second of the preceding SQL statements returns all columns from the d1001 table only. - -With regard to the other SQL functions that support wildcards, the differences are as follows: -`count(*)` only returns one column. `first`, `last`, and `last_row` return all columns. - -### Tag Columns - -You can query tag columns in supertables and subtables and receive results in the same way as querying data columns. - -```sql -SELECT location, groupid, current FROM d1001 LIMIT 2; -``` - -### Distinct Values - -The DISTINCT keyword returns only values that are different over one or more columns. You can use the DISTINCT keyword with tag columns and data columns. - -The following SQL statement returns distinct values from a tag column: - -```sql -SELECT DISTINCT tag_name [, tag_name ...] FROM stb_name; -``` - -The following SQL statement returns distinct values from a data column: - -```sql -SELECT DISTINCT col_name [, col_name ...] FROM tb_name; -``` - -:::info - -1. Configuration parameter `maxNumOfDistinctRes` in `taos.cfg` is used to control the number of rows to output. The minimum configurable value is 100,000, the maximum configurable value is 100,000,000, the default value is 1,000,000. If the actual number of rows exceeds the value of this parameter, only the number of rows specified by this parameter will be output. -2. It can't be guaranteed that the results selected by using `DISTINCT` on columns of `FLOAT` or `DOUBLE` are exactly unique because of the precision errors in floating point numbers. -3. `DISTINCT` can't be used in the sub-query of a nested query statement, and can't be used together with aggregate functions, `GROUP BY` or `JOIN` in the same SQL statement. - -::: - -### Column Names - -When using `SELECT`, the column names in the result set will be the same as that in the select clause if `AS` is not used. `AS` can be used to rename the column names in the result set. For example: - -```sql -taos> SELECT ts, ts AS primary_key_ts FROM d1001; -``` - -`AS` can't be used together with `first(*)`, `last(*)`, or `last_row(*)`. - -### Pseudocolumns - -**TBNAME** -The TBNAME pseudocolumn in a supertable contains the names of subtables within the supertable. - -The following SQL statement returns all unique subtable names and locations within the meters supertable: - -```mysql -SELECT DISTINCT TBNAME, location FROM meters; -``` - -Use the `INS_TAGS` system table in `INFORMATION_SCHEMA` to query the information for subtables in a supertable. For example, the following statement returns the name and tag values for each subtable in the `meters` supertable. - -```mysql -SELECT table_name, tag_name, tag_type, tag_value FROM information_schema.ins_tags WHERE stable_name='meters'; -``` - -The following SQL statement returns the number of subtables within the meters supertable. - -```mysql -SELECT COUNT(*) FROM (SELECT DISTINCT TBNAME FROM meters); -``` - -In the preceding two statements, only tags can be used as filtering conditions in the WHERE clause. For example: - -**\_QSTART and \_QEND** - -The \_QSTART and \_QEND pseudocolumns contain the beginning and end of the time range of a query. If the WHERE clause in a statement does not contain valid timestamps, the time range is equal to [-2^63, 2^63 - 1]. - -The \_QSTART and \_QEND pseudocolumns cannot be used in a WHERE clause. - -**\_WSTART, \_WEND, and \_WDURATION** - -The \_WSTART, \_WEND, and \_WDURATION pseudocolumns indicate the beginning, end, and duration of a window. - -These pseudocolumns can be used only in time window-based aggregations and must occur after the aggregation clause. - -**\_c0 and \_ROWTS** - -In TDengine, the first column of all tables must be a timestamp. This column is the primary key of the table. The \_c0 and \_ROWTS pseudocolumns both represent the values of this column. These pseudocolumns enable greater flexibility and standardization. For example, you can use functions such as MAX and MIN with these pseudocolumns. - -```sql -select _rowts, max(current) from meters; -``` - -## Query Objects - -`FROM` can be followed by a number of tables or super tables, or can be followed by a sub-query. -If no database is specified as current database in use, table names must be preceded with database name, for example, `power.d1001`. - -You can perform INNER JOIN statements based on the primary key. The following conditions apply: - -1. You can use FROM table list or an explicit JOIN clause. -2. For standard tables and subtables, you must specify an ON condition and the condition must be equivalent to the primary key. -3. For supertables, the ON condition must be equivalent to the primary key. In addition, the tag columns of the tables on which the INNER JOIN is performed must have a one-to-one relationship. You cannot specify an OR condition. -4. The tables that are included in a JOIN clause must be of the same type (supertable, standard table, or subtable). -5. You can include subqueries before and after the JOIN keyword. -6. You cannot include more than ten tables in a JOIN clause. -7. You cannot include a FILL clause and a JOIN clause in the same statement. - -## GROUP BY - -If you use a GROUP BY clause, the SELECT list can only include the following items: - -1. Constants -2. Aggregate functions -3. Expressions that are consistent with the expression following the GROUP BY clause -4. Expressions that include the preceding expression - -The GROUP BY clause groups each row of data by the value of the expression following the clause and returns a combined result for each group. - -The expressions in a GROUP BY clause can include any column in any table or view. It is not necessary that the expressions appear in the SELECT list. - -The GROUP BY clause does not guarantee that the results are ordered. If you want to ensure that grouped data is ordered, use the ORDER BY clause. - - -## PARTITION BY - -The PARTITION BY clause is a TDengine-specific extension to standard SQL. This clause partitions data based on the part_list and performs computations per partition. - -For more information, see TDengine Extensions. - -## ORDER BY - -The ORDER BY keyword orders query results. If you do not include an ORDER BY clause in a query, the order of the results can be inconsistent. - -You can specify integers after ORDER BY to indicate the order in which you want the items in the SELECT list to be displayed. For example, 1 indicates the first item in the select list. - -You can specify ASC for ascending order or DESC for descending order. - -You can also use the NULLS keyword to specify the position of null values. Ascending order uses NULLS LAST by default. Descending order uses NULLS FIRST by default. - -## LIMIT - -The LIMIT keyword controls the number of results that are displayed. You can also use the OFFSET keyword to specify the result to display first. `LIMIT` and `OFFSET` are executed after `ORDER BY` in the query execution. You can include an offset in a LIMIT clause. For example, LIMIT 5 OFFSET 2 can also be written LIMIT 2, 5. Both of these clauses display the third through the seventh results. - -In a statement that includes a PARTITION BY clause, the LIMIT keyword is performed on each partition, not on the entire set of results. - -## SLIMIT - -The SLIMIT keyword is used with a PARTITION BY clause to control the number of partitions that are displayed. You can include an offset in a SLIMIT clause. For example, SLIMIT 5 OFFSET 2 can also be written LIMIT 2, 5. Both of these clauses display the third through the seventh partitions. - -Note: If you include an ORDER BY clause, only one partition can be displayed. - -## Special Query - -Some special query functions can be invoked without `FROM` sub-clause. - -## Obtain Current Database - -The following SQL statement returns the current database. If a database has not been specified on login or with the `USE` command, a null value is returned. - -```sql -SELECT DATABASE(); -``` - -### Obtain Current Version - -```sql -SELECT CLIENT_VERSION(); -SELECT SERVER_VERSION(); -``` - -## Obtain Server Status - -The following SQL statement returns the status of the TDengine server. An integer indicates that the server is running normally. An error code indicates that an error has occurred. This statement can also detect whether a connection pool or third-party tool is connected to TDengine properly. By using this statement, you can ensure that connections in a pool are not lost due to an incorrect heartbeat detection statement. - -```sql -SELECT SERVER_STATUS(); -``` - -### Obtain Current Time - -```sql -SELECT NOW(); -``` - -### Obtain Current Date - -```sql -SELECT TODAY(); -``` - -### Obtain Current Time Zone - -```sql -SELECT TIMEZONE(); -``` - -## Regular Expression - -### Syntax - -```txt -WHERE (column|tbname) **match/MATCH/nmatch/NMATCH** _regex_ -``` - -### Specification - -TDengine supports POSIX regular expression syntax. For more information, see [Regular Expressions](https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap09.html). - -### Restrictions - -Regular expression filtering is supported only on table names (TBNAME), BINARY tags, and NCHAR tags. Regular expression filtering cannot be performed on data columns. - -A regular expression string cannot exceed 128 bytes. You can configure this value by modifying the maxRegexStringLen parameter on the TDengine Client. The modified value takes effect when the client is restarted. - -## JOIN - -TDengine supports natural joins between supertables, between standard tables, and between subqueries. The difference between natural joins and inner joins is that natural joins require that the fields being joined in the supertables or standard tables must have the same name. Data or tag columns must be joined with the equivalent column in another table. - -For standard tables, only the timestamp (primary key) can be used in join operations. For example: - -```sql -SELECT * -FROM temp_tb_1 t1, pressure_tb_1 t2 -WHERE t1.ts = t2.ts -``` - -For supertables, tags as well as timestamps can be used in join operations. For example: - -```sql -SELECT * -FROM temp_stable t1, temp_stable t2 -WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0; -``` - -Similarly, join operations can be performed on the result sets of multiple subqueries. - -:::note - -The following restriction apply to JOIN statements: - -- The number of tables or supertables in a single join operation cannot exceed 10. -- `FILL` cannot be used in a JOIN statement. -- Arithmetic operations cannot be performed on the result sets of join operation. -- `GROUP BY` is not allowed on a segment of the tables that participate in a join operation. -- `OR` cannot be used in the conditions for join operation -- Join operation can be performed only on tags or timestamps. You cannot perform a join operation on data columns. - -::: - -## Nested Query - -Nested query is also called sub query. This means that in a single SQL statement the result of inner query can be used as the data source of the outer query. - -From 2.2.0.0, unassociated sub query can be used in the `FROM` clause. Unassociated means the sub query doesn't use the parameters in the parent query. More specifically, in the `tb_name_list` of `SELECT` statement, an independent SELECT statement can be used. So a complete nested query looks like: - -``` -SELECT ... FROM (SELECT ... FROM ...) ...; -``` - -:::info - -- Only one layer of nesting is allowed, that means no sub query is allowed within a sub query -- The result set returned by the inner query will be used as a "virtual table" by the outer query. The "virtual table" can be renamed using `AS` keyword for easy reference in the outer query. -- Sub query is not allowed in continuous query. -- JOIN operation is allowed between tables/STables inside both inner and outer queries. Join operation can be performed on the result set of the inner query. -- UNION operation is not allowed in either inner query or outer query. -- The functions that can be used in the inner query are the same as those that can be used in a non-nested query. - - `ORDER BY` inside the inner query is unnecessary and will slow down the query performance significantly. It is best to avoid the use of `ORDER BY` inside the inner query. -- Compared to the non-nested query, the functionality that can be used in the outer query has the following restrictions: - - Functions - - If the result set returned by the inner query doesn't contain timestamp column, then functions relying on timestamp can't be used in the outer query, like `TOP`, `BOTTOM`, `FIRST`, `LAST`, `DIFF`. - - Functions that need to scan the data twice can't be used in the outer query, like `STDDEV`, `PERCENTILE`. - - `IN` operator is not allowed in the outer query but can be used in the inner query. - - `GROUP BY` is not supported in the outer query. - -::: - -## UNION ALL - -```txt title=Syntax -SELECT ... -UNION ALL SELECT ... -[UNION ALL SELECT ...] -``` - -TDengine supports the `UNION ALL` operation. `UNION ALL` operator can be used to combine the result set from multiple select statements as long as the result set of these select statements have exactly the same columns. `UNION ALL` doesn't remove redundant rows from multiple result sets. In a single SQL statement, at most 100 `UNION ALL` can be supported. - -### Examples - -table `tb1` is created using below SQL statement: - -``` -CREATE TABLE tb1 (ts TIMESTAMP, col1 INT, col2 FLOAT, col3 BINARY(50)); -``` - -The rows in the past one hour in `tb1` can be selected using below SQL statement: - -``` -SELECT * FROM tb1 WHERE ts >= NOW - 1h; -``` - -The rows between 2018-06-01 08:00:00.000 and 2018-06-02 08:00:00.000 and col3 ends with 'nny' can be selected in the descending order of timestamp using below SQL statement: - -``` -SELECT * FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' AND ts <= '2018-06-02 08:00:00.000' AND col3 LIKE '%nny' ORDER BY ts DESC; -``` - -The sum of col1 and col2 for rows later than 2018-06-01 08:00:00.000 and whose col2 is bigger than 1.2 can be selected and renamed as "complex", while only 10 rows are output from the 5th row, by below SQL statement: - -``` -SELECT (col1 + col2) AS 'complex' FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' AND col2 > 1.2 LIMIT 10 OFFSET 5; -``` - -The rows in the past 10 minutes and whose col2 is bigger than 3.14 are selected and output to the result file `/home/testoutput.csv` with below SQL statement: - -``` -SELECT COUNT(*) FROM tb1 WHERE ts >= NOW - 10m AND col2 > 3.14 >> /home/testoutput.csv; -``` diff --git a/docs/en/17-taos-sql/08-delete-data.mdx b/docs/en/17-taos-sql/08-delete-data.mdx deleted file mode 100644 index 999c467ad0..0000000000 --- a/docs/en/17-taos-sql/08-delete-data.mdx +++ /dev/null @@ -1,41 +0,0 @@ ---- -sidebar_label: Delete Data -description: "Delete data from table or Stable" -title: Delete Data ---- - -TDengine provides the functionality of deleting data from a table or STable according to specified time range, it can be used to cleanup abnormal data generated due to device failure. - -**Syntax:** - -```sql -DELETE FROM [ db_name. ] tb_name [WHERE condition]; -``` - -**Description:** Delete data from a table or STable - -**Parameters:** - -- `db_name`: Optional parameter, specifies the database in which the table exists; if not specified, the current database will be used. -- `tb_name`: Mandatory parameter, specifies the table name from which data will be deleted, it can be normal table, subtable or STable. -- `condition`: Optional parameter, specifies the data filter condition. If no condition is specified all data will be deleted, so please be cautions to delete data without any condition. The condition used here is only applicable to the first column, i.e. the timestamp column. - -**More Explanations:** - -The data can't be recovered once deleted, so please be cautious to use the functionality of deleting data. It's better to firstly make sure the data to be deleted using `select` then execute `delete`. - -**Example:** - -`meters` is a STable, in which `groupid` is a tag column of int type. Now we want to delete the data older than 2021-10-01 10:40:00.100. You can perform this action by running the following SQL statement: - -```sql -delete from meters where ts < '2021-10-01 10:40:00.100' ; -``` - -The output is: - -``` -Deleted 102000 row(s) from 1020 table(s) (0.421950s) -``` - -It means totally 102,000 rows of data have been deleted from 1,020 sub tables. diff --git a/docs/en/17-taos-sql/10-function.md b/docs/en/17-taos-sql/10-function.md deleted file mode 100644 index c8af2d32e7..0000000000 --- a/docs/en/17-taos-sql/10-function.md +++ /dev/null @@ -1,1238 +0,0 @@ ---- -sidebar_label: Functions -title: Functions -toc_max_heading_level: 4 -description: TDengine Built-in Functions. ---- - -## Single Row Functions - -Single row functions return a result for each row. - -### Mathematical Functions - -#### ABS - -```sql -SELECT ABS(field_name) FROM { tb_name | stb_name } [WHERE clause] -``` - -**Description**: The absolute value of a specific field. - -**Return value type**: Same as the field being used - -**Applicable data types**: Numeric - -**Nested query**: It can be used in both the outer query and inner query in a nested query. - -**Applicable table types**: standard tables and supertables - -**Usage**: This function can only be used on data columns. It can be used with selection and projection functions but not with aggregation functions. - -#### ACOS - -```sql -SELECT ACOS(field_name) FROM { tb_name | stb_name } [WHERE clause] -``` - -**Description**: The arc cosine of a specific field. - -**Return value type**: Double - -**Applicable data types**: Numeric - -**Nested query**: It can be used in both the outer query and inner query in a nested query. - -**Applicable table types**: standard tables and supertables - -**Usage**: This function can only be used on data columns. It can be used with selection and projection functions but not with aggregation functions. - -#### ASIN - -```sql -SELECT ASIN(field_name) FROM { tb_name | stb_name } [WHERE clause] -``` - -**Description**: The arc sine of a specific field. - -**Return value type**: Double - -**Applicable data types**: Numeric - -**Nested query**: It can be used in both the outer query and inner query in a nested query. - -**Applicable table types**: standard tables and supertables - -**Usage**: This function can only be used on data columns. It can be used with selection and projection functions but not with aggregation functions. - - -#### ATAN - -```sql -SELECT ATAN(field_name) FROM { tb_name | stb_name } [WHERE clause] -``` - -**Description**: The arc tangent of a specific field. - -**Return value type**: Double - -**Applicable data types**: Numeric - -**Nested query**: It can be used in both the outer query and inner query in a nested query. - -**Applicable table types**: standard tables and supertables - -**Usage**: This function can only be used on data columns. It can be used with selection and projection functions but not with aggregation functions. - - -#### CEIL - -```sql -SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause]; -``` - -**Description**: The rounded up value of a specific field - -**Return value type**: Same as the field being used - -**Applicable data types**: Numeric - -**Applicable table types**: standard tables and supertables - -**Nested query**: It can be used in both the outer query and inner query in a nested query. - -**Usage**: This function can only be used on data columns. It can be used with selection and projection functions but not with aggregation functions. - -#### COS - -```sql -SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause] -``` - -**Description**: The cosine of a specific field. - -**Return value type**: Double - -**Applicable data types**: Numeric - -**Nested query**: It can be used in both the outer query and inner query in a nested query. - -**Applicable table types**: standard tables and supertables - -**Usage**: This function can only be used on data columns. It can be used with selection and projection functions but not with aggregation functions. - -#### FLOOR - -```sql -SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause]; -``` - -**Description**: The rounded down value of a specific field - **More explanations**: The restrictions are same as those of the `CEIL` function. - -#### LOG - -```sql -SELECT LOG(field_name[, base]) FROM { tb_name | stb_name } [WHERE clause] -``` - -**Description**: The logarithm of a specific field with `base` as the radix. If you do not enter a base, the natural logarithm of the field is returned. - -**Return value type**: Double - -**Applicable data types**: Numeric - -**Nested query**: It can be used in both the outer query and inner query in a nested query. - -**Applicable table types**: standard tables and supertables - -**Usage**: This function can only be used on data columns. It can be used with selection and projection functions but not with aggregation functions. - - -#### POW - -```sql -SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause] -``` - -**Description**: The power of a specific field with `power` as the exponent. - -**Return value type**: Double - -**Applicable data types**: Numeric - -**Nested query**: It can be used in both the outer query and inner query in a nested query. - -**Applicable table types**: standard tables and supertables - -**Usage**: This function can only be used on data columns. It can be used with selection and projection functions but not with aggregation functions. - - -#### ROUND - -```sql -SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause]; -``` - -**Description**: The rounded value of a specific field. - **More explanations**: The restrictions are same as those of the `CEIL` function. - - -#### SIN - -```sql -SELECT SIN(field_name) FROM { tb_name | stb_name } [WHERE clause] -``` - -**Description**: The sine of a specific field. - -**Return value type**: Double - -**Applicable data types**: Numeric - -**Nested query**: It can be used in both the outer query and inner query in a nested query. - -**Applicable table types**: standard tables and supertables - -**Usage**: This function can only be used on data columns. It can be used with selection and projection functions but not with aggregation functions. - -#### SQRT - -```sql -SELECT SQRT(field_name) FROM { tb_name | stb_name } [WHERE clause] -``` - -**Description**: The square root of a specific field. - -**Return value type**: Double - -**Applicable data types**: Numeric - -**Nested query**: It can be used in both the outer query and inner query in a nested query. - -**Applicable table types**: standard tables and supertables - -**Usage**: This function can only be used on data columns. It can be used with selection and projection functions but not with aggregation functions. - -#### TAN - -```sql -SELECT TAN(field_name) FROM { tb_name | stb_name } [WHERE clause] -``` - -**Description**: The tangent of a specific field. - -**Return value type**: Double - -**Applicable data types**: Numeric - -**Nested query**: It can be used in both the outer query and inner query in a nested query. - -**Applicable table types**: standard tables and supertables - -**Usage**: This function can only be used on data columns. It can be used with selection and projection functions but not with aggregation functions. - -### Concatenation Functions - -Concatenation functions take strings as input and produce string or numeric values as output. - -#### CHAR_LENGTH - -```sql -SELECT CHAR_LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause] -``` - -**Description**: The length in number of characters of a string - -**Return value type**: Bigint - -**Applicable data types**: VARCHAR and NCHAR - -**Nested query**: It can be used in both the outer query and inner query in a nested query. - -**Applicable table types**: standard tables and supertables - -#### CONCAT - -```sql -SELECT CONCAT(str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause] -``` - -**Description**: The concatenation result of two or more strings - -**Return value type**: If the concatenated strings are VARCHARs, the result is a VARCHAR. If the concatenated strings are NCHARs, the result is an NCHAR. If an input value is null, the result is null. - -**Applicable data types**: VARCHAR and NCHAR You can concatenate between 2 and 8 strings. - -**Nested query**: It can be used in both the outer query and inner query in a nested query. - -**Applicable table types**: standard tables and supertables - - -#### CONCAT_WS - -```sql -SELECT CONCAT_WS(separator, str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause] -``` - -**Description**: The concatenation result of two or more strings with separator - -**Return value type**: If the concatenated strings are VARCHARs, the result is a VARCHAR. If the concatenated strings are NCHARs, the result is an NCHAR. If an input value is null, the result is null. - -**Applicable data types**: VARCHAR and NCHAR You can concatenate between 3 and 9 strings. - -**Nested query**: It can be used in both the outer query and inner query in a nested query. - -**Applicable table types**: standard tables and supertables - - -#### LENGTH - -```sql -SELECT LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause] -``` - -**Description**: The length in bytes of a string - -**Return value type**: Bigint - -**Applicable data types**: VARCHAR and NCHAR fields or columns - -**Nested query**: It can be used in both the outer query and inner query in a nested query. - -**Applicable table types**: standard tables and supertables - - -#### LOWER - -```sql -SELECT LOWER(str|column) FROM { tb_name | stb_name } [WHERE clause] -``` - -**Description**: Convert the input string to lower case - -**Return value type**: Same as input - -**Applicable data types**: VARCHAR and NCHAR - -**Nested query**: It can be used in both the outer query and inner query in a nested query. - -**Applicable table types**: standard tables and supertables - - -#### LTRIM - -```sql -SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause] -``` - -**Description**: Remove the left leading blanks of a string - -**Return value type**: Same as input - -**Applicable data types**: VARCHAR and NCHAR - -**Nested query**: It can be used in both the outer query and inner query in a nested query. - -**Applicable table types**: standard tables and supertables - - -#### RTRIM - -```sql -SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause] -``` - -**Description**: Remove the right tailing blanks of a string - -**Return value type**: Same as input - -**Applicable data types**: VARCHAR and NCHAR - -**Nested query**: It can be used in both the outer query and inner query in a nested query. - -**Applicable table types**: standard tables and supertables - - -#### SUBSTR - -```sql -SELECT SUBSTR(str,pos[,len]) FROM { tb_name | stb_name } [WHERE clause] -``` - -**Description**: The sub-string starting from `pos` with length of `len` from the original string `str` - If `len` is not specified, it means from `pos` to the end. - -**Return value type**: Same as input - -**Applicable data types**: VARCHAR and NCHAR Parameter `pos` can be an positive or negative integer; If it's positive, the starting position will be counted from the beginning of the string; if it's negative, the starting position will be counted from the end of the string. - -**Nested query**: It can be used in both the outer query and inner query in a nested query. - -**Applicable table types**: table, STable - - -#### UPPER - -```sql -SELECT UPPER(str|column) FROM { tb_name | stb_name } [WHERE clause] -``` - -**Description**: Convert the input string to upper case - -**Return value type**: Same as input - -**Applicable data types**: VARCHAR and NCHAR - -**Nested query**: It can be used in both the outer query and inner query in a nested query. - -**Applicable table types**: table, STable - - -### Conversion Functions - -Conversion functions change the data type of a value. - -#### CAST - -```sql -SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause] -``` - -**Description**: Convert the input data `expression` into the type specified by `type_name`. This function can be used only in SELECT statements. - -**Return value type**: The type specified by parameter `type_name` - -**Applicable data types**: All data types except JSON - -**Nested query**: It can be used in both the outer query and inner query in a nested query. - -**Applicable table types**: standard tables and supertables - -**More explanations**: - -- Error will be reported for unsupported type casting -- Some values of some supported data types may not be casted, below are known issues: - 1. Some strings cannot be converted to numeric values. For example, the string `a` may be converted to `0`. However, this does not produce an error. - 2. If a converted numeric value is larger than the maximum size for the specified type, an overflow will occur. However, this does not produce an error. - 3. If a converted string value is larger than the maximum size for the specified type, the output value will be truncated. However, this does not produce an error. - -#### TO_ISO8601 - -```sql -SELECT TO_ISO8601(ts[, timezone]) FROM { tb_name | stb_name } [WHERE clause]; -``` - -**Description**: The ISO8601 date/time format converted from a UNIX timestamp, plus the timezone. You can specify any time zone with the timezone parameter. If you do not enter this parameter, the time zone on the client is used. - -**Return value type**: VARCHAR - -**Applicable data types**: Integers and timestamps - -**Nested query**: It can be used in both the outer query and inner query in a nested query. - -**Applicable table types**: standard tables and supertables - -**More explanations**: - -- You can specify a time zone in the following format: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]。 For example, TO_ISO8601(1, "+00:00"). -- If the input is a UNIX timestamp, the precision of the returned value is determined by the digits of the input timestamp -- If the input is a column of TIMESTAMP type, the precision of the returned value is same as the precision set for the current data base in use - - -#### TO_JSON - -```sql -SELECT TO_JSON(str_literal) FROM { tb_name | stb_name } [WHERE clause]; -``` - -**Description**: Converts a string into JSON. - -**Return value type**: JSON - -**Applicable data types**: JSON strings in the form `{"literal": literal}`. `{}` indicates a null value. The key must be a string literal. The value can be a numeric literal, string literal, Boolean literal, or null literal. str_literal cannot include escape characters. - -**Nested query**: It can be used in both the outer query and inner query in a nested query. - -**Applicable table types**: table, STable - - -#### TO_UNIXTIMESTAMP - -```sql -SELECT TO_UNIXTIMESTAMP(datetime_string) FROM { tb_name | stb_name } [WHERE clause]; -``` - -**Description**: UNIX timestamp converted from a string of date/time format - -**Return value type**: BIGINT - -**Applicable column types**: VARCHAR and NCHAR - -**Nested query**: It can be used in both the outer query and inner query in a nested query. - -**Applicable table types**: standard tables and supertables - -**More explanations**: - -- The input string must be compatible with ISO8601/RFC3339 standard, NULL will be returned if the string can't be converted -- The precision of the returned timestamp is same as the precision set for the current data base in use - - -### Time and Date Functions - -These functions perform operations on times and dates. - -All functions that return the current time, such as `NOW`, `TODAY`, and `TIMEZONE`, are calculated only once per statement even if they appear multiple times. - -#### NOW - -```sql -SELECT NOW() FROM { tb_name | stb_name } [WHERE clause]; -SELECT select_expr FROM { tb_name | stb_name } WHERE ts_col cond_operator NOW(); -INSERT INTO tb_name VALUES (NOW(), ...); -``` - -**Description**: The current time of the client side system - -**Return value type**: TIMESTAMP - -**Applicable column types**: TIMESTAMP only - -**Applicable table types**: standard tables and supertables - -**Nested query**: It can be used in both the outer query and inner query in a nested query. - -**More explanations**: - -- Add and Subtract operation can be performed, for example NOW() + 1s, the time unit can be: - b(nanosecond), u(microsecond), a(millisecond)), s(second), m(minute), h(hour), d(day), w(week) -- The precision of the returned timestamp is same as the precision set for the current data base in use - - -#### TIMEDIFF - -```sql -SELECT TIMEDIFF(ts | datetime_string1, ts | datetime_string2 [, time_unit]) FROM { tb_name | stb_name } [WHERE clause]; -``` - -**Description**: The difference between two timestamps, and rounded to the time unit specified by `time_unit` - -**Return value type**: BIGINT - -**Applicable column types**: UNIX-style timestamps in BIGINT and TIMESTAMP format and other timestamps in VARCHAR and NCHAR format - -**Applicable table types**: standard tables and supertables - -**Nested query**: It can be used in both the outer query and inner query in a nested query. - -**More explanations**: -- Time unit specified by `time_unit` can be: - 1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks) -- The precision of the returned timestamp is same as the precision set for the current data base in use -- If the input data is not formatted as a timestamp, the returned value is null. - - -#### TIMETRUNCATE - -```sql -SELECT TIMETRUNCATE(ts | datetime_string , time_unit) FROM { tb_name | stb_name } [WHERE clause]; -``` - -**Description**: Truncate the input timestamp with unit specified by `time_unit` - -**Return value type**: TIMESTAMP - -**Applicable column types**: UNIX-style timestamps in BIGINT and TIMESTAMP format and other timestamps in VARCHAR and NCHAR format - -**Applicable table types**: standard tables and supertables - -**More explanations**: -- Time unit specified by `time_unit` can be: - 1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks) -- The precision of the returned timestamp is same as the precision set for the current data base in use -- If the input data is not formatted as a timestamp, the returned value is null. - - -#### TIMEZONE - -```sql -SELECT TIMEZONE() FROM { tb_name | stb_name } [WHERE clause]; -``` - -**Description**: The timezone of the client side system - -**Applicable data types**: VARCHAR - -**Applicable column types**: None - -**Applicable table types**: standard tables and supertables - - -#### TODAY - -```sql -SELECT TODAY() FROM { tb_name | stb_name } [WHERE clause]; -SELECT select_expr FROM { tb_name | stb_name } WHERE ts_col cond_operator TODAY()]; -INSERT INTO tb_name VALUES (TODAY(), ...); -``` - -**Description**: The timestamp of 00:00:00 of the client side system - -**Return value type**: TIMESTAMP - -**Applicable column types**: TIMESTAMP only - -**Applicable table types**: standard tables and supertables - -**More explanations**: - -- Add and Subtract operation can be performed, for example TODAY() + 1s, the time unit can be: - b(nanosecond), u(microsecond), a(millisecond)), s(second), m(minute), h(hour), d(day), w(week) -- The precision of the returned timestamp is same as the precision set for the current data base in use - - -## Aggregate Functions - -Aggregate functions return one row per group. You can use windows or GROUP BY to group data. Otherwise, the entire query is considered a single group. - -TDengine supports the following aggregate functions: - -### APERCENTILE - -```sql -SELECT APERCENTILE(field_name, P[, algo_type]) FROM { tb_name | stb_name } [WHERE clause] -``` - -**Description**: Similar to `PERCENTILE`, but a simulated result is returned - -**Return value type**: DOUBLE - -**Applicable data types**: Numeric - -**Applicable table types**: standard tables and supertables - -**Explanations**: -- _P_ is in range [0,100], when _P_ is 0, the result is same as using function MIN; when _P_ is 100, the result is same as function MAX. -- `algo_type` can only be input as `default` or `t-digest` Enter `default` to use a histogram-based algorithm. Enter `t-digest` to use the t-digest algorithm to calculate the approximation of the quantile. `default` is used by default. - -### AVG - -```sql -SELECT AVG(field_name) FROM tb_name [WHERE clause]; -``` - -**Description**: The average value of the specified fields. - -**Return value type**: DOUBLE - -**Applicable data types**: Numeric - -**Applicable table types**: standard tables and supertables - - -### COUNT - -```sql -SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause]; -``` - -**Description**: The number of records in the specified fields. - -**Return value type**: BIGINT - -**Applicable data types**: Numeric - -**Applicable table types**: standard tables and supertables - -**More explanation**: - -- Wildcard (\*) is used to represent all columns. -If you input a specific column, the number of non-null values in the column is returned. - - -### ELAPSED - -```sql -SELECT ELAPSED(ts_primary_key [, time_unit]) FROM { tb_name | stb_name } [WHERE clause] [INTERVAL(interval [, offset]) [SLIDING sliding]]; -``` - -**Description**:`elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calculated time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length. - -**Return value type**: Double if the input value is not NULL; - -**Return value type**: TIMESTAMP - -**Applicable tables**: table, STable, outer in nested query - -**Explanations**: -- `field_name` parameter can only be the first column of a table, i.e. timestamp primary key. -- The minimum value of `time_unit` is the time precision of the database. If `time_unit` is not specified, the time precision of the database is used as the default time unit. Time unit specified by `time_unit` can be: - 1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks) -- It can be used with `INTERVAL` to get the time valid time length of each time window. Please be noted that the return value is same as the time window for all time windows except for the first and the last time window. -- `order by asc/desc` has no effect on the result. -- `group by tbname` must be used together when `elapsed` is used against a STable. -- `group by` must NOT be used together when `elapsed` is used against a table or sub table. -- When used in nested query, it's only applicable when the inner query outputs an implicit timestamp column as the primary key. For example, `select elapsed(ts) from (select diff(value) from sub1)` is legal usage while `select elapsed(ts) from (select * from sub1)` is not. In addition, because elapsed has a strict dependency on the timeline, a statement like `select elapsed(ts) from (select diff(value) from st group by tbname) will return a meaningless result. -- It can't be used with `leastsquares`, `diff`, `derivative`, `top`, `bottom`, `last_row`, `interp`. - - -### LEASTSQUARES - -```sql -SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause]; -``` - -**Description**: The linear regression function of the specified column and the timestamp column (primary key), `start_val` is the initial value and `step_val` is the step value. - -**Return value type**: A string in the format of "(slope, intercept)" - -**Applicable data types**: Numeric - -**Applicable table types**: table only - - -### SPREAD - -```sql -SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause]; -``` - -**Description**: The difference between the max and the min of a specific column - -**Return value type**: DOUBLE - -**Applicable data types**: Integers and timestamps - -**Applicable table types**: standard tables and supertables - - -### STDDEV - -```sql -SELECT STDDEV(field_name) FROM tb_name [WHERE clause]; -``` - -**Description**: Standard deviation of a specific column in a table or STable - -**Return value type**: DOUBLE - -**Applicable data types**: Numeric - -**Applicable table types**: standard tables and supertables - - -### SUM - -```sql -SELECT SUM(field_name) FROM tb_name [WHERE clause]; -``` - -**Description**: The sum of a specific column in a table or STable - -**Return value type**: DOUBLE or BIGINT - -**Applicable data types**: Numeric - -**Applicable table types**: standard tables and supertables - - -### HYPERLOGLOG - -```sql -SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause]; -``` - -**Description**: - The cardinal number of a specific column is returned by using hyperloglog algorithm. The benefit of using hyperloglog algorithm is that the memory usage is under control when the data volume is huge. - However, when the data volume is very small, the result may be not accurate, it's recommended to use `select count(data) from (select unique(col) as data from table)` in this case. - -**Return value type**: Integer - -**Applicable data types**: Numeric - -**Applicable table types**: standard tables and supertables - - -### HISTOGRAM - -```sql -SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_name [WHERE clause]; -``` - -**Description**:Returns count of data points in user-specified ranges. - -**Return value type** If normalized is set to 1, a DOUBLE is returned; otherwise a BIGINT is returned - -**Applicable data types**: Numeric - -**Applicable table types**: table, STable - -**Explanations**: -- bin_type: parameter to indicate the bucket type, valid inputs are: "user_input", "linear_bin", "log_bin"。 -- bin_description: parameter to describe how to generate buckets,can be in the following JSON formats for each bin_type respectively: - - "user_input": "[1, 3, 5, 7]": - User specified bin values. - - - "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}" - "start" - bin starting point. "width" - bin offset. "count" - number of bins generated. "infinity" - whether to add(-inf, inf)as start/end point in generated set of bins. - The above "linear_bin" descriptor generates a set of bins: [-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf]. - - - "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}" - "start" - bin starting point. "factor" - exponential factor of bin offset. "count" - number of bins generated. "infinity" - whether to add(-inf, inf)as start/end point in generated range of bins. - The above "linear_bin" descriptor generates a set of bins: [-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf]. -- normalized: setting to 1/0 to turn on/off result normalization. Valid values are 0 or 1. - - -### PERCENTILE - -```sql -SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause]; -``` - -**Description**: The value whose rank in a specific column matches the specified percentage. If such a value matching the specified percentage doesn't exist in the column, an interpolation value will be returned. - -**Return value type**: DOUBLE - -**Applicable column types**: Numeric - -**Applicable table types**: table only - -**More explanations**: _P_ is in range [0,100], when _P_ is 0, the result is same as using function MIN; when _P_ is 100, the result is same as function MAX. - - -## Selection Functions - -Selection functions return one or more results depending. You can specify the timestamp column, tbname pseudocolumn, or tag columns to show which rows contain the selected value. - -### BOTTOM - -```sql -SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause]; -``` - -**Description**: The least _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly. - -**Return value type**:Same as the data type of the column being operated upon - -**Applicable data types**: Numeric - -**Applicable table types**: standard tables and supertables - -**More explanation**: - -- _k_ must be in range [1,100] -- The timestamp associated with the selected values are returned too -- Can't be used with `FILL` - -### FIRST - -```sql -SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause]; -``` - -**Description**: The first non-null value of a specific column in a table or STable - -**Return value type**:Same as the data type of the column being operated upon - -**Applicable data types**: Numeric - -**Applicable table types**: standard tables and supertables - -**More explanation**: - -- FIRST(\*) can be used to get the first non-null value of all columns -- NULL will be returned if all the values of the specified column are all NULL -- A result will NOT be returned if all the columns in the result set are all NULL - -### INTERP - -```sql -SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] RANGE(timestamp1,timestamp2) EVERY(interval) FILL({ VALUE | PREV | NULL | LINEAR | NEXT}); -``` - -**Description**: The value that matches the specified timestamp range is returned, if existing; or an interpolation value is returned. - -**Return value type**: Same as the column being operated upon - -**Applicable data types**: Numeric - -**Applicable table types**: standard tables and supertables - -**More explanations** - -- `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter. -- The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input. -- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1<=timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified. -- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter. -- Interpolation is performed based on `FILL` parameter. -- `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable. - -### LAST - -```sql -SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause]; -``` - -**Description**: The last non-NULL value of a specific column in a table or STable - -**Return value type**:Same as the data type of the column being operated upon - -**Applicable data types**: Numeric - -**Applicable table types**: standard tables and supertables - -**More explanation**: - -- LAST(\*) can be used to get the last non-NULL value of all columns -- If the values of a column in the result set are all NULL, NULL is returned for that column; if all columns in the result are all NULL, no result will be returned. -- When it's used on a STable, if there are multiple values with the timestamp in the result set, one of them will be returned randomly and it's not guaranteed that the same value is returned if the same query is run multiple times. - - -### LAST_ROW - -```sql -SELECT LAST_ROW(field_name) FROM { tb_name | stb_name }; -``` - -**Description**: The last row of a table or STable - -**Return value type**:Same as the data type of the column being operated upon - -**Applicable data types**: Numeric - -**Applicable table types**: standard tables and supertables - -**More explanations**: - -- When it's used on a STable, if there are multiple values with the timestamp in the result set, one of them will be returned randomly and it's not guaranteed that the same value is returned if the same query is run multiple times. -- Can't be used with `INTERVAL`. - -### MAX - -```sql -SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause]; -``` - -**Description**: The maximum value of a specific column of a table or STable - -**Return value type**:Same as the data type of the column being operated upon - -**Applicable data types**: Numeric - -**Applicable table types**: standard tables and supertables - - -### MIN - -```sql -SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause]; -``` - -**Description**: The minimum value of a specific column in a table or STable - -**Return value type**:Same as the data type of the column being operated upon - -**Applicable data types**: Numeric - -**Applicable table types**: standard tables and supertables - - -### MODE - -```sql -SELECT MODE(field_name) FROM tb_name [WHERE clause]; -``` - -**Description**:The value which has the highest frequency of occurrence. NULL is returned if there are multiple values which have highest frequency of occurrence. - -**Return value type**: Same as the input data - -**Applicable data types**: Numeric - -**Applicable table types**: standard tables and supertables - - -### SAMPLE - -```sql -SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause] -``` - -**Description**: _k_ sampling values of a specific column. The applicable range of _k_ is [1,1000]. - -**Return value type**: Same as the column being operated plus the associated timestamp - -**Applicable data types**: Any data type except for tags of STable - -**Applicable nested query**: Inner query and Outer query - -**Applicable table types**: standard tables and supertables - -**More explanations**: - -This function cannot be used in expression calculation. -- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline - - -### TAIL - -```sql -SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause]; -``` - -**Description**: The next _k_ rows are returned after skipping the last `offset_val` rows, NULL values are not ignored. `offset_val` is optional parameter. When it's not specified, the last _k_ rows are returned. When `offset_val` is used, the effect is same as `order by ts desc LIMIT k OFFSET offset_val`. - -**Parameter value range**: k: [1,100] offset_val: [0,100] - -**Return value type**:Same as the data type of the column being operated upon - -**Applicable data types**: Any data type except for timestamp, i.e. the primary key - -**Applicable table types**: standard tables and supertables - - -### TOP - -```sql -SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause]; -``` - -**Description**: The greatest _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly. - -**Return value type**:Same as the data type of the column being operated upon - -**Applicable data types**: Numeric - -**Applicable table types**: standard tables and supertables - -**More explanation**: - -- _k_ must be in range [1,100] -- The timestamp associated with the selected values are returned too -- Can't be used with `FILL` - -### UNIQUE - -```sql -SELECT UNIQUE(field_name) FROM {tb_name | stb_name} [WHERE clause]; -``` - -**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword, but it can also be used to match tags or timestamp. The first occurrence of a timestamp or tag is used. - -**Return value type**:Same as the data type of the column being operated upon - -**Applicable column types**: Any data types except for timestamp - -**Applicable table types**: table, STable - - -## Time-Series Extensions - -TDengine includes extensions to standard SQL that are intended specifically for time-series use cases. The functions enabled by these extensions require complex queries to implement in general-purpose databases. By offering them as built-in extensions, TDengine reduces user workload. - -### CSUM - -```sql -SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause] -``` - -**Description**: The cumulative sum of each row for a specific column. The number of output rows is same as that of the input rows. - -**Return value type**: Long integer for integers; Double for floating points. uint64_t for unsigned integers - -**Applicable data types**: Numeric - -**Nested query**: It can be used in both the outer query and inner query in a nested query. - -**Applicable table types**: standard tables and supertables - -**More explanations**: - -- Arithmetic operation can't be performed on the result of `csum` function -- Can only be used with aggregate functions This function can be used with supertables and standard tables. -- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline - - -### DERIVATIVE - -```sql -SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHERE clause]; -``` - -**Description**: The derivative of a specific column. The time rage can be specified by parameter `time_interval`, the minimum allowed time range is 1 second (1s); the value of `ignore_negative` can be 0 or 1, 1 means negative values are ignored. - -**Return value type**: DOUBLE - -**Applicable data types**: Numeric - -**Applicable table types**: standard tables and supertables - -**More explanation**: - -- It can be used together with `PARTITION BY tbname` against a STable. -- It can be used together with a selected column. For example: select \_rowts, DERIVATIVE() from。 - -### DIFF - -```sql -SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHERE clause]; -``` - -**Description**: The different of each row with its previous row for a specific column. `ignore_negative` can be specified as 0 or 1, the default value is 1 if it's not specified. `1` means negative values are ignored. - -**Return value type**:Same as the data type of the column being operated upon - -**Applicable data types**: Numeric - -**Applicable table types**: standard tables and supertables - -**More explanation**: - -- The number of result rows is the number of rows subtracted by one, no output for the first row -- It can be used together with a selected column. For example: select \_rowts, DIFF() from。 - - -### IRATE - -```sql -SELECT IRATE(field_name) FROM tb_name WHERE clause; -``` - -**Description**: instantaneous rate on a specific column. The last two samples in the specified time range are used to calculate instantaneous rate. If the last sample value is smaller, then only the last sample value is used instead of the difference between the last two sample values. - -**Return value type**: DOUBLE - -**Applicable data types**: Numeric - -**Applicable table types**: standard tables and supertables - - -### MAVG - -```sql -SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause] -``` - -**Description**: The moving average of continuous _k_ values of a specific column. If the number of input rows is less than _k_, nothing is returned. The applicable range of _k_ is [1,1000]. - -**Return value type**: DOUBLE - -**Applicable data types**: Numeric - -**Nested query**: It can be used in both the outer query and inner query in a nested query. - -**Applicable table types**: standard tables and supertables - -**More explanations**: - -- Arithmetic operation can't be performed on the result of `MAVG`. -- Can only be used with data columns, can't be used with tags. - Can't be used with aggregate functions. -- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline - - -### STATECOUNT - -```sql -SELECT STATECOUNT(field_name, oper, val) FROM { tb_name | stb_name } [WHERE clause]; -``` - -**Description**: The number of continuous rows satisfying the specified conditions for a specific column. The result is shown as an extra column for each row. If the specified condition is evaluated as true, the number is increased by 1; otherwise the number is reset to -1. If the input value is NULL, then the corresponding row is skipped. - -**Applicable parameter values**: - -- oper : Can be one of `LT` (lower than), `GT` (greater than), `LE` (lower than or equal to), `GE` (greater than or equal to), `NE` (not equal to), `EQ` (equal to), the value is case insensitive -- val : Numeric types - -**Return value type**: Integer - -**Applicable data types**: Numeric - -**Applicable nested query**: Outer query only - -**Applicable table types**: standard tables and supertables - -**More explanations**: - -- Must be used together with `PARTITION BY tbname` when it's used on a STable to force the result into each single timeline] -- Can't be used with window operation, like interval/state_window/session_window - - -### STATEDURATION - -```sql -SELECT stateDuration(field_name, oper, val, unit) FROM { tb_name | stb_name } [WHERE clause]; -``` - -**Description**: The length of time range in which all rows satisfy the specified condition for a specific column. The result is shown as an extra column for each row. The length for the first row that satisfies the condition is 0. Next, if the condition is evaluated as true for a row, the time interval between current row and its previous row is added up to the time range; otherwise the time range length is reset to -1. If the value of the column is NULL, the corresponding row is skipped. - -**Applicable parameter values**: - -- oper : Can be one of `LT` (lower than), `GT` (greater than), `LE` (lower than or equal to), `GE` (greater than or equal to), `NE` (not equal to), `EQ` (equal to), the value is case insensitive -- val : Numeric types -- unit: The unit of time interval. Enter one of the following options: 1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks) If you do not enter a unit of time, the precision of the current database is used by default. - -**Return value type**: Integer - -**Applicable data types**: Numeric - -**Applicable nested query**: Outer query only - -**Applicable table types**: standard tables and supertables - -**More explanations**: - -- Must be used together with `PARTITION BY tbname` when it's used on a STable to force the result into each single timeline] -- Can't be used with window operation, like interval/state_window/session_window - - -### TWA - -```sql -SELECT TWA(field_name) FROM tb_name WHERE clause; -``` - -**Description**: Time weighted average on a specific column within a time range - -**Return value type**: DOUBLE - -**Applicable data types**: Numeric - -**Applicable table types**: standard tables and supertables - -- Must be used together with `PARTITION BY tbname` to force the result into each single timeline. - - -## System Information Functions - -### DATABASE - -```sql -SELECT DATABASE(); -``` - -**Description**: The current database. If no database is specified upon logging in and no database is specified with `USE` after login, NULL will be returned by `select database()`. - - -### CLIENT_VERSION - -```sql -SELECT CLIENT_VERSION(); -``` - -**Description**: The client version. - -### SERVER_VERSION - -```sql -SELECT SERVER_VERSION(); -``` - -**Description**: The server version. - -### SERVER_STATUS - -```sql -SELECT SERVER_STATUS(); -``` - -**Description**: The server status. diff --git a/docs/en/17-taos-sql/12-distinguished.md b/docs/en/17-taos-sql/12-distinguished.md deleted file mode 100644 index 17c7f0c445..0000000000 --- a/docs/en/17-taos-sql/12-distinguished.md +++ /dev/null @@ -1,156 +0,0 @@ ---- -sidebar_label: Time-Series Extensions -title: Time-Series Extensions -description: TimeSeries Data Specific Queries. ---- - -As a purpose-built database for storing and processing time-series data, TDengine provides time-series-specific extensions to standard SQL. - -These extensions include tag-partitioned queries and windowed queries. - -## Tag-Partitioned Queries - -When you query a supertable, you may need to partition the supertable by tag and perform additional operations on a specific partition. In this case, you can use the following SQL clause: - -```sql -PARTITION BY part_list -``` - -part_list can be any scalar expression, such as a column, constant, scalar function, or a combination of the preceding items. - -A PARTITION BY clause with a tag is processed as follows: - -- The PARTITION BY clause must occur after the WHERE clause and cannot be used with a JOIN clause. -- The PARTITION BY clause partitions the super table by the specified tag group, and the specified calculation is performed on each partition. The calculation performed is determined by the rest of the statement - a window clause, GROUP BY clause, or SELECT clause. -- You can use PARTITION BY together with a window clause or GROUP BY clause. In this case, the window or GROUP BY clause takes effect on every partition. For example, the following statement partitions the table by the location tag, performs downsampling over a 10 minute window, and returns the maximum value: - -```sql -select max(current) from meters partition by location interval(10m) -``` - -## Windowed Queries - -Aggregation by time window is supported in TDengine. For example, in the case where temperature sensors report the temperature every seconds, the average temperature for every 10 minutes can be retrieved by performing a query with a time window. Window related clauses are used to divide the data set to be queried into subsets and then aggregation is performed across the subsets. There are three kinds of windows: time window, status window, and session window. There are two kinds of time windows: sliding window and flip time/tumbling window. The query syntax is as follows: - -```sql -SELECT function_list FROM tb_name - [WHERE where_condition] - [SESSION(ts_col, tol_val)] - [STATE_WINDOW(col)] - [INTERVAL(interval [, offset]) [SLIDING sliding]] - [FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})] -``` - -The following restrictions apply: - -### Restricted Functions - -- Aggregate functions and select functions can be used in `function_list`, with each function having only one output. For example COUNT, AVG, SUM, STDDEV, LEASTSQUARES, PERCENTILE, MIN, MAX, FIRST, LAST. Functions having multiple outputs, such as DIFF or arithmetic operations can't be used. -- `LAST_ROW` can't be used together with window aggregate. -- Scalar functions, like CEIL/FLOOR, can't be used with window aggregate. - -### Other Rules - -- The window clause must occur after the PARTITION BY clause and before the GROUP BY clause. It cannot be used with a GROUP BY clause. -- SELECT clauses on windows can contain only the following expressions: - - Constants - - Aggregate functions - - Expressions that include the preceding expressions. -- The window clause cannot be used with a GROUP BY clause. -- `WHERE` clause can be used to specify the starting and ending time and other filter conditions - - -### Window Pseudocolumns - -**\_WSTART, \_WEND, and \_WDURATION** - -The \_WSTART, \_WEND, and \_WDURATION pseudocolumns indicate the beginning, end, and duration of a window. - -These pseudocolumns occur after the aggregation clause. - -### FILL Clause - -`FILL` clause is used to specify how to fill when there is data missing in any window, including: - -1. NONE: No fill (the default fill mode) -2. VALUE:Fill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` Note: The value filled depends on the data type. For example, if you run FILL(VALUE 1.23) on an integer column, the value 1 is filled. -3. PREV:Fill with the previous non-NULL value, `FILL(PREV)` -4. NULL:Fill with NULL, `FILL(NULL)` -5. LINEAR:Fill with the closest non-NULL value, `FILL(LINEAR)` -6. NEXT:Fill with the next non-NULL value, `FILL(NEXT)` - -:::info - -1. A huge volume of interpolation output may be returned using `FILL`, so it's recommended to specify the time range when using `FILL`. The maximum number of interpolation values that can be returned in a single query is 10,000,000. -2. The result set is in ascending order of timestamp when you aggregate by time window. -3. If aggregate by window is used on STable, the aggregate function is performed on all the rows matching the filter conditions. If `GROUP BY` is not used in the query, the result set will be returned in ascending order of timestamp; otherwise the result set is not exactly in the order of ascending timestamp in each group. - -::: - -### Time Window - -There are two kinds of time windows: sliding window and flip time/tumbling window. - -The `INTERVAL` clause is used to generate time windows of the same time interval. The `SLIDING` parameter is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining a continuous query, both the size of the time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e] ,[t1s , t1e], [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time/tumbling window. - -![TDengine Database Time Window](./timewindow-1.webp) - -`INTERVAL` and `SLIDING` should be used with aggregate functions and select functions. The SQL statement below is illegal because no aggregate or selection function is used with `INTERVAL`. - -``` -SELECT * FROM temp_tb_1 INTERVAL(1m); -``` - -The time step specified by `SLIDING` cannot exceed the time interval specified by `INTERVAL`. The SQL statement below is illegal because the time length specified by `SLIDING` exceeds that specified by `INTERVAL`. - -``` -SELECT COUNT(*) FROM temp_tb_1 INTERVAL(1m) SLIDING(2m); -``` - -When using time windows, note the following: - -- The window length for aggregation depends on the value of INTERVAL. The minimum interval is 10 ms. You can configure a window as an offset from UTC 0:00. The offset cannot be smaller than the interval. You can use SLIDING to specify the length of time that the window moves forward. -Please note that the `timezone` parameter should be configured to be the same value in the `taos.cfg` configuration file on client side and server side. -- The result set is in ascending order of timestamp when you aggregate by time window. - -### Status Window - -In case of using integer, bool, or string to represent the status of a device at any given moment, continuous rows with the same status belong to a status window. Once the status changes, the status window closes. As shown in the following figure, there are two status windows according to status, [2019-04-28 14:22:07,2019-04-28 14:22:10] and [2019-04-28 14:22:11,2019-04-28 14:22:12]. Status window is not applicable to STable for now. - -![TDengine Database Status Window](./timewindow-3.webp) - -`STATE_WINDOW` is used to specify the column on which the status window will be based. For example: - -``` -SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status); -``` - -### Session Window - -The primary key, i.e. timestamp, is used to determine which session window a row belongs to. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10,2019-04-28 14:22:30] and [2019-04-28 14:23:10,2019-04-28 14:23:30] because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds. - -![TDengine Database Session Window](./timewindow-2.webp) - -If the time interval between two continuous rows are within the time interval specified by `tol_value` they belong to the same session window; otherwise a new session window is started automatically. Session window is not supported on STable for now. - -``` - -SELECT COUNT(*), FIRST(ts) FROM temp_tb_1 SESSION(ts, tol_val); -``` - -### Examples - -A table of intelligent meters can be created by the SQL statement below: - -``` -CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT); -``` - -The average current, maximum current and median of current in every 10 minutes for the past 24 hours can be calculated using the SQL statement below, with missing values filled with the previous non-NULL values. The query statement is as follows: - -``` -SELECT AVG(current), MAX(current), APERCENTILE(current, 50) FROM meters - WHERE ts>=NOW-1d and ts<=now - INTERVAL(10m) - FILL(PREV); -``` diff --git a/docs/en/17-taos-sql/13-tmq.md b/docs/en/17-taos-sql/13-tmq.md deleted file mode 100644 index 33122e770e..0000000000 --- a/docs/en/17-taos-sql/13-tmq.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -sidebar_label: Data Subscription -title: Data Subscription -description: Subscribe Data from TDengine. ---- - -The information in this document is related to the TDengine data subscription feature. - -## Create a Topic - -```sql -CREATE TOPIC [IF NOT EXISTS] topic_name AS subquery; -``` - - -You can use filtering, scalar functions, and user-defined scalar functions with a topic. JOIN, GROUP BY, windows, aggregate functions, and user-defined aggregate functions are not supported. The following rules apply to subscribing to a column: - -1. The returned field is determined when the topic is created. -2. Columns to which a consumer is subscribed or that are involved in calculations cannot be deleted or modified. -3. If you add a column, the new column will not appear in the results for the subscription. -4. If you run `SELECT \*`, all columns in the subscription at the time of its creation are displayed. This includes columns in supertables, standard tables, and subtables. Supertables are shown as data columns plus tag columns. - - -## Delete a Topic - -```sql -DROP TOPIC [IF EXISTS] topic_name; -``` - -If a consumer is subscribed to the topic that you delete, the consumer will receive an error. - -## View Topics - -## SHOW TOPICS - -```sql -SHOW TOPICS; -``` - -The preceding command displays all topics in the current database. - -## Create Consumer Group - -You can create consumer groups only through the TDengine Client driver or the API provided by a connector. - -## Delete Consumer Group - -```sql -DROP CONSUMER GROUP [IF EXISTS] cgroup_name ON topic_name; -``` - -This deletes the cgroup_name in the topic_name. - -## View Consumer Groups - -```sql -SHOW CONSUMERS; -``` - -The preceding command displays all consumer groups in the current database. diff --git a/docs/en/17-taos-sql/14-stream.md b/docs/en/17-taos-sql/14-stream.md deleted file mode 100644 index d26adc9c7f..0000000000 --- a/docs/en/17-taos-sql/14-stream.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -sidebar_label: Stream Processing -title: Stream Processing -description: Built-in Stream Processing. ---- - -Raw time-series data is often cleaned and preprocessed before being permanently stored in a database. Stream processing components like Kafka, Flink, and Spark are often deployed alongside a time-series database to handle these operations, increasing system complexity and maintenance costs. - -Because stream processing is built in to TDengine, you are no longer reliant on middleware. TDengine offers a unified platform for writing, preprocessing, permanent storage, complex analysis, and real-time computation and alerting. Additionally, you can use SQL to perform all these tasks. - -## Create a Stream - -```sql -CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name AS subquery -stream_options: { - TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time] - WATERMARK time -} - -``` - -The subquery is a subset of standard SELECT query syntax: - -```sql -subquery: SELECT [DISTINCT] select_list - from_clause - [WHERE condition] - [PARTITION BY tag_list] - [window_clause] -``` - -Session windows, state windows, and sliding windows are supported. When you configure a session or state window for a supertable, you must use PARTITION BY TBNAME. - -```sql -window_clause: { - SESSION(ts_col, tol_val) - | STATE_WINDOW(col) - | INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] -} -``` - -`SESSION` indicates a session window, and `tol_val` indicates the maximum range of the time interval. If the time interval between two continuous rows are within the time interval specified by `tol_val` they belong to the same session window; otherwise a new session window is started automatically. - -For example, the following SQL statement creates a stream and automatically creates a supertable named `avg_vol`. The stream has a 1 minute time window that slides forward in 30 second intervals to calculate the average voltage of the meters supertable. - -```sql -CREATE STREAM avg_vol_s INTO avg_vol AS -SELECT _wstartts, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s); -``` - -## Delete a Stream - -```sql -DROP STREAM [IF NOT EXISTS] stream_name -``` - -This statement deletes the stream processing service only. The data generated by the stream is retained. - -## View Streams - -```sql -SHOW STREAMS; -``` - -## Trigger Stream Processing - -When you create a stream, you can use the TRIGGER parameter to specify triggering conditions for it. - -For non-windowed processing, triggering occurs in real time. For windowed processing, there are three methods of triggering: - -1. AT_ONCE: triggers on write - -2. WINDOW_CLOSE: triggers when the window closes. This is determined by the event time. You can use WINDOW_CLOSE together with `watermark`. For more information, see Stream Processing Strategy for Out-of-Order Data. - -3. MAX_DELAY: triggers when the window closes. If the window has not closed but the time elapsed exceeds MAX_DELAY, stream processing is also triggered. - -Because the window closing is determined by the event time, a delay or termination of an event stream will prevent the event time from being updated. This may result in an inability to obtain the latest results. - -For this reason, MAX_DELAY is provided as a way to ensure that processing occurs even if the window does not close. - -MAX_DELAY also triggers when the window closes. Additionally, if a write occurs but the processing is not triggered before MAX_DELAY expires, processing is also triggered. - -## Stream Processing Strategy for Out-of-Order Data - -When you create a stream, you can specify a watermark in the `stream_option` parameter. - -The watermark is used to specify the tolerance for out-of-order data. The default value is 0. - -T = latest event time - watermark - -The window closing time for each batch of data that arrives at the system is updated using the preceding formula, and all windows are closed whose closing time is less than T. If the triggering method is WINDOW_CLOSE or MAX_DELAY, the aggregate result for the window is pushed. - -Stream processing strategy for expired data -The data in expired windows is tagged as expired. TDengine stream processing provides two methods for handling such data: - -1. Drop the data. This is the default and often only handling method for most stream processing engines. - -2. Recalculate the data. In this method, all data in the window is reobtained from the database and recalculated. The latest results are then returned. - -In both of these methods, configuring the watermark is essential for obtaining accurate results (if expired data is dropped) and avoiding repeated triggers that affect system performance (if expired data is recalculated). diff --git a/docs/en/17-taos-sql/16-operators.md b/docs/en/17-taos-sql/16-operators.md deleted file mode 100644 index 8dd1cef5ca..0000000000 --- a/docs/en/17-taos-sql/16-operators.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -sidebar_label: Operators -title: Operators -description: TDengine Supported Operators ---- - -## Arithmetic Operators - -| # | **Operator** | **Supported Data Types** | **Description** | -| --- | :--------: | -------------- | -------------------------- | -| 1 | +, - | Numeric | Expresses sign. Unary operators. | -| 2 | +, - | Numeric | Expresses addition and subtraction. Binary operators. | -| 3 | \*, / | Numeric | Expresses multiplication and division. Binary operators. | -| 4 | % | Numeric | Expresses modulo. Binary operator. | - -## Bitwise Operators - -| # | **Operator** | **Supported Data Types** | **Description** | -| --- | :--------: | -------------- | ------------------ | -| 1 | & | Numeric | Bitwise AND. Binary operator. | -| 2 | \| | Numeric | Bitwise OR. Binary operator. | - -## JSON Operators - -The `->` operator returns the value for a key in JSON column. Specify the column indicator on the left of the operator and the key name on the right of the operator. For example, `col->name` returns the value of the name key. - -## Set Operators - -Set operators combine the results of two queries. Queries that include set operators are known as compound queries. The expressions corresponding to each query in the select list in a compound query must match in number. The results returned take the data type of the first query, and the data type returned by subsequent queries must be convertible into the data type of the first query. The conditions of the `CAST` function apply to this conversion. - -TDengine supports the `UNION` and `UNION ALL` operations. UNION ALL collects all query results and returns them as a composite result without deduplication. UNION collects all query results and returns them as a deduplicated composite result. In a single SQL statement, at most 100 set operators can be supported. - -## Comparison Operators - -| # | **Operator** | **Supported Data Types** | **Description** | -| --- | :---------------: | -------------------------------------------------------------------- | -------------------- | -| 1 | = | All types except BLOB, MEDIUMBLOB, and JSON | Equal to | -| 2 | <\>, != | All types except BLOB, MEDIUMBLOB, and JSON; the primary key (timestamp) is also not supported | Not equal to | -| 3 | \>, < | All types except BLOB, MEDIUMBLOB, and JSON | Greater than and less than | -| 4 | \>=, <= | All types except BLOB, MEDIUMBLOB, and JSON | Greater than or equal to and less than or equal to | -| 5 | IS [NOT] NULL | All types | Indicates whether the value is null | -| 6 | [NOT] BETWEEN AND | All types except BLOB, MEDIUMBLOB, and JSON | Closed interval comparison | -| 7 | IN | All types except BLOB, MEDIUMBLOB, and JSON; the primary key (timestamp) is also not supported | Equal to any value in the list | -| 8 | LIKE | BINARY, NCHAR, and VARCHAR | Wildcard match | -| 9 | MATCH, NMATCH | BINARY, NCHAR, and VARCHAR | Regular expression match | -| 10 | CONTAINS | JSON | Indicates whether the key exists | - -LIKE is used together with wildcards to match strings. Its usage is described as follows: - -- '%' matches 0 or any number of characters, '\_' matches any single ASCII character. -- `\_` is used to match the \_ in the string. -- The maximum length of wildcard string is 100 bytes. A very long wildcard string may slowdown the execution performance of `LIKE` operator. - -MATCH and NMATCH are used together with regular expressions to match strings. Their usage is described as follows: - -- Use POSIX regular expression syntax. For more information, see Regular Expressions. -- Regular expression can be used against only table names, i.e. `tbname`, and tags of binary/nchar types, but can't be used against data columns. -- The maximum length of regular expression string is 128 bytes. Configuration parameter `maxRegexStringLen` can be used to set the maximum allowed regular expression. It's a configuration parameter on the client side, and will take effect after restarting the client. - -## Logical Operators - -| # | **Operator** | **Supported Data Types** | **Description** | -| --- | :--------: | -------------- | --------------------------------------------------------------------------- | -| 1 | AND | BOOL | Logical AND; if both conditions are true, TRUE is returned; If either condition is false, FALSE is returned. -| 2 | OR | BOOL | Logical OR; if either condition is true, TRUE is returned; If both conditions are false, FALSE is returned. - -TDengine performs short-path optimization when calculating logical conditions. If the first condition for AND is false, FALSE is returned without calculating the second condition. If the first condition for OR is true, TRUE is returned without calculating the second condition diff --git a/docs/en/17-taos-sql/17-json.md b/docs/en/17-taos-sql/17-json.md deleted file mode 100644 index 5ea611058e..0000000000 --- a/docs/en/17-taos-sql/17-json.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -sidebar_label: JSON Type -title: JSON Type -description: JSON Data Type ---- - - -## Syntax - -1. Tag of type JSON - - ``` - create stable s1 (ts timestamp, v1 int) tags (info json) - - create table s1_1 using s1 tags ('{"k1": "v1"}') - ``` - -2. "->" Operator of JSON - - ``` - select * from s1 where info->'k1' = 'v1' - - select info->'k1' from s1 - ``` - -3. "contains" Operator of JSON - - ``` - select * from s1 where info contains 'k2' - - select * from s1 where info contains 'k1' - ``` - -## Applicable Operations - -1. When a JSON data type is used in `where`, `match/nmatch/between and/like/and/or/is null/is no null` can be used but `in` can't be used. - - ``` - select * from s1 where info->'k1' match 'v*'; - - select * from s1 where info->'k1' like 'v%' and info contains 'k2'; - - select * from s1 where info is null; - - select * from s1 where info->'k1' is not null - ``` - -2. A tag of JSON type can be used in `group by`, `order by`, `join`, `union all` and sub query; for example `group by json->'key'` - -3. `Distinct` can be used with a tag of type JSON - - ``` - select distinct info->'k1' from s1 - ``` - -4. Tag Operations - - The value of a JSON tag can be altered. Please note that the full JSON will be overridden when doing this. - - The name of a JSON tag can be altered. - - A tag of JSON type can't be added or removed. The column length of a JSON tag can't be changed. - -## Other Restrictions - -- JSON type can only be used for a tag. There can be only one tag of JSON type, and it's exclusive to any other types of tags. - -- The maximum length of keys in JSON is 256 bytes, and key must be printable ASCII characters. The maximum total length of a JSON is 4,096 bytes. - -- JSON format: - - - The input string for JSON can be empty, i.e. "", "\t", or NULL, but it can't be non-NULL string, bool or array. - - object can be {}, and the entire JSON is empty if so. Key can be "", and it's ignored if so. - - value can be int, double, string, bool or NULL, and it can't be an array. Nesting is not allowed which means that the value of a key can't be JSON. - - If one key occurs twice in JSON, only the first one is valid. - - Escape characters are not allowed in JSON. - -- NULL is returned when querying a key that doesn't exist in JSON. - -- If a tag of JSON is the result of inner query, it can't be parsed and queried in the outer query. - - For example, the SQL statements below are not supported. - - ``` - select jtag->'key' from (select jtag from stable) - ``` - - and - - ``` - select jtag->'key' from (select jtag from stable) where jtag->'key'>0 - ``` diff --git a/docs/en/17-taos-sql/18-escape.md b/docs/en/17-taos-sql/18-escape.md deleted file mode 100644 index 872397b29a..0000000000 --- a/docs/en/17-taos-sql/18-escape.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: Escape Characters -description: How to use Escape ---- - -## Escape Characters - -| Escape Character | **Actual Meaning** | -| :--------------: | ------------------------ | -| `\'` | Single quote ' | -| `\"` | Double quote " | -| \n | Line Break | -| \r | Carriage Return | -| \t | tab | -| `\\` | Back Slash \ | -| `\%` | % see below for details | -| `\_` | \_ see below for details | - -## Restrictions - -1. If there are escape characters in identifiers (database name, table name, column name) - - Identifier without ``: Error will be returned because identifier must be constituted of digits, ASCII characters or underscore and can't be started with digits - - Identifier quoted with ``: Original content is kept, no escaping -2. If there are escape characters in values - - The escape characters will be escaped as the above table. If the escape character doesn't match any supported one, the escape character "\" will be ignored. - - "%" and "\_" are used as wildcards in `like`. `\%` and `\_` should be used to represent literal "%" and "\_" in `like`,. If `\%` and `\_` are used out of `like` context, the evaluation result is "`\%`"and "`\_`", instead of "%" and "\_". diff --git a/docs/en/17-taos-sql/19-limit.md b/docs/en/17-taos-sql/19-limit.md deleted file mode 100644 index b63cf469b8..0000000000 --- a/docs/en/17-taos-sql/19-limit.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -sidebar_label: Limits -title: Limits -description: Naming Limits ---- - -## Naming Rules - -1. Names can include letters, digits, and underscores (_). -2. Names can begin with letters or underscores (_) but not with digits. -3. Names are not case-sensitive. -4. Rules for names with escape characters are as follows: - You can escape a name by enclosing it in backticks (`). In this way, you can reuse keyword names for table names. However, the first three naming rules no longer apply. - Table and column names that are enclosed in escape characters are still subject to length limits. When the length of such a name is calculated, the escape characters are not included. Names specified using escape character are case-sensitive. - - For example, \`aBc\` and \`abc\` are different table or column names, but "abc" and "aBc" are same names because internally they are all "abc". - Only ASCII visible characters can be used with escape character. - -## Password Rules - -`[a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/]` - -The following characters cannot occur in a password: single quotation marks ('), double quotation marks ("), backticks (`), backslashes (\\), and spaces. - -## General Limits - -- Maximum length of database name is 32 bytes -- Maximum length of table name is 192 bytes, excluding the database name prefix and the separator. -- Maximum length of each data row is 48K bytes. Note that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type. -- The maximum length of a column name is 64 bytes. -- Maximum number of columns is 4096. There must be at least 2 columns, and the first column must be timestamp. -- The maximum length of a tag name is 64 bytes -- Maximum number of tags is 128. There must be at least 1 tag. The total length of tag values cannot exceed 16 KB. -- Maximum length of single SQL statement is 1 MB (1048576 bytes). It can be configured in the parameter `maxSQLLength` in the client side, the applicable range is [65480, 1048576]. -- At most 4096 columns can be returned by `SELECT`. Functions in the query statement constitute columns. An error is returned if the limit is exceeded. -- Maximum numbers of databases, STables, tables are dependent only on the system resources. -- The number of replicas can only be 1 or 3. -- The maximum length of a username is 23 bytes. -- The maximum length of a password is 15 bytes. -- The maximum number of rows depends on system resources. -- The maximum number of vnodes in a database is 1024. - -## Restrictions of Table/Column Names - -### Name Restrictions of Table/Column - -The name of a table or column can only be composed of ASCII characters, digits and underscore and it cannot start with a digit. The maximum length is 192 bytes. Names are case insensitive. The name mentioned in this rule doesn't include the database name prefix and the separator. - -### Name Restrictions After Escaping - -To support more flexible table or column names, new escape character "\`" is introduced in TDengine to avoid the conflict between table name and keywords and break the above restrictions for table names. The escape character is not counted in the length of table name. -With escaping, the string inside escape characters are case sensitive, i.e. will not be converted to lower case internally. The table names specified using escape character are case sensitive. - -For example: -\`aBc\` and \`abc\` are different table or column names, but "abc" and "aBc" are same names because internally they are all "abc". - -:::note -The characters inside escape characters must be printable characters. - -::: diff --git a/docs/en/17-taos-sql/20-keywords.md b/docs/en/17-taos-sql/20-keywords.md deleted file mode 100644 index d29ed5d018..0000000000 --- a/docs/en/17-taos-sql/20-keywords.md +++ /dev/null @@ -1,277 +0,0 @@ ---- -sidebar_label: Keywords -title: Reserved Keywords -description: Reserved Keywords in TDengine SQL ---- - -## Keyword List - -There are about 200 keywords reserved by TDengine, they can't be used as the name of database, STable or table with either upper case, lower case or mixed case. The following list shows all reserved keywords: - -### A - -- ABORT -- ACCOUNT -- ACCOUNTS -- ADD -- AFTER -- ALL -- ALTER -- AND -- AS -- ASC -- ATTACH - -### B - -- BEFORE -- BEGIN -- BETWEEN -- BIGINT -- BINARY -- BITAND -- BITNOT -- BITOR -- BLOCKS -- BOOL -- BY - -### C - -- CACHE -- CACHELAST -- CASCADE -- CHANGE -- CLUSTER -- COLON -- COLUMN -- COMMA -- COMP -- COMPACT -- CONCAT -- CONFLICT -- CONNECTION -- CONNECTIONS -- CONNS -- COPY -- CREATE -- CTIME - -### D - -- DATABASE -- DATABASES -- DAYS -- DBS -- DEFERRED -- DELETE -- DELIMITERS -- DESC -- DESCRIBE -- DETACH -- DISTINCT -- DIVIDE -- DNODE -- DNODES -- DOT -- DOUBLE -- DROP - -### E - -- END -- EQ -- EXISTS -- EXPLAIN - -### F - -- FAIL -- FILE -- FILL -- FLOAT -- FOR -- FROM -- FSYNC - -### G - -- GE -- GLOB -- GRANTS -- GROUP -- GT - -### H - -- HAVING - -### I - -- ID -- IF -- IGNORE -- IMMEDIA -- IMPORT -- IN -- INITIAL -- INSERT -- INSTEAD -- INT -- INTEGER -- INTERVA -- INTO -- IS -- ISNULL - -### J - -- JOIN - -### K - -- KEEP -- KEY -- KILL - -### L - -- LE -- LIKE -- LIMIT -- LINEAR -- LOCAL -- LP -- LSHIFT -- LT - -### M - -- MATCH -- MAXROWS -- MINROWS -- MINUS -- MNODES -- MODIFY -- MODULES - -### N - -- NE -- NONE -- NOT -- NOTNULL -- NOW -- NULL - -### O - -- OF -- OFFSET -- OR -- ORDER - -### P - -- PARTITION -- PASS -- PLUS -- PPS -- PRECISION -- PREV -- PRIVILEGE - -### Q - -- QTIME -- QUERIE -- QUERY -- QUORUM - -### R - -- RAISE -- REM -- REPLACE -- REPLICA -- RESET -- RESTRIC -- ROW -- RP -- RSHIFT - -### S - -- SCORES -- SELECT -- SEMI -- SESSION -- SET -- SHOW -- SLASH -- SLIDING -- SLIMIT -- SMALLIN -- SOFFSET -- STable -- STableS -- STAR -- STATE -- STATEMENT -- STATE_WI -- STORAGE -- STREAM -- STREAMS -- STRING -- SYNCDB - -### T - -- TABLE -- TABLES -- TAG -- TAGS -- TBNAME -- TIMES -- TIMESTAMP -- TINYINT -- TOPIC -- TOPICS -- TRIGGER -- TSERIES - -### U - -- UMINUS -- UNION -- UNSIGNED -- UPDATE -- UPLUS -- USE -- USER -- USERS -- USING - -### V - -- VALUES -- VARIABLE -- VARIABLES -- VGROUPS -- VIEW -- VNODES - -### W - -- WAL -- WHERE - -### \_ - -- \_C0 -- \_QSTART -- \_QSTOP -- \_QDURATION -- \_WSTART -- \_WSTOP -- \_WDURATION diff --git a/docs/en/17-taos-sql/26-udf.md b/docs/en/17-taos-sql/26-udf.md deleted file mode 100644 index 71603c8804..0000000000 --- a/docs/en/17-taos-sql/26-udf.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -sidebar_label: UDF -title: User-Defined Functions (UDF) -description: User Defined Functions ---- - -You can create user-defined functions and import them into TDengine. -## Create UDF - -SQL command can be executed on the host where the generated UDF DLL resides to load the UDF DLL into TDengine. This operation cannot be done through REST interface or web console. Once created, any client of the current TDengine can use these UDF functions in their SQL commands. UDF are stored in the management node of TDengine. The UDFs loaded in TDengine would be still available after TDengine is restarted. - -When creating UDF, the type of UDF, i.e. a scalar function or aggregate function must be specified. If the specified type is wrong, the SQL statements using the function would fail with errors. The input data type and output data type must be consistent with the UDF definition. - -- Create Scalar Function -```sql -CREATE FUNCTION function_name AS library_path OUTPUTTYPE output_type; -``` - - - function_name: The scalar function name to be used in SQL statement which must be consistent with the UDF name and is also the name of the compiled DLL (.so file). - - library_path: The absolute path of the DLL file including the name of the shared object file (.so). The path must be quoted with single or double quotes. - - output_type: The data type of the results of the UDF. - - For example, the following SQL statement can be used to create a UDF from `libbitand.so`. - - ```sql - CREATE FUNCTION bit_and AS "/home/taos/udf_example/libbitand.so" OUTPUTTYPE INT; - ``` - -- Create Aggregate Function -```sql -CREATE AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [ BUFSIZE buffer_size ]; -``` - - - function_name: The aggregate function name to be used in SQL statement which must be consistent with the udfNormalFunc name and is also the name of the compiled DLL (.so file). - - library_path: The absolute path of the DLL file including the name of the shared object file (.so). The path must be quoted with single or double quotes. - - output_type: The output data type, the value is the literal string of the supported TDengine data type. - - buffer_size: The size of the intermediate buffer in bytes. This parameter is optional. - - For example, the following SQL statement can be used to create a UDF from `libl2norm.so`. - - ```sql - CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 8; - ``` -## Manage UDF - -- The following statement deleted the specified user-defined function. -``` -DROP FUNCTION function_name; -``` - -- function_name: The value of function_name in the CREATE statement used to import the UDF for example `bit_and` or `l2norm`. -```sql -DROP FUNCTION bit_and; -``` -- Show Available UDF -```sql -SHOW FUNCTIONS; -``` - -## Call UDF - -The function name specified when creating UDF can be used directly in SQL statements, just like builtin functions. For example: -```sql -SELECT X(c1,c2) FROM table/stable; -``` - -The above SQL statement invokes function X for column c1 and c2. You can use query keywords like WHERE with user-defined functions. diff --git a/docs/en/17-taos-sql/27-index.md b/docs/en/17-taos-sql/27-index.md deleted file mode 100644 index 7215c26f6a..0000000000 --- a/docs/en/17-taos-sql/27-index.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -sidebar_label: Index -title: Using Indices -description: Use Index to Accelerate Query. ---- - -TDengine supports SMA and FULLTEXT indexing. - -## Create an Index - -```sql -CREATE FULLTEXT INDEX index_name ON tb_name (col_name [, col_name] ...) - -CREATE SMA INDEX index_name ON tb_name index_option - -index_option: - FUNCTION(functions) INTERVAL(interval_val [, interval_offset]) [SLIDING(sliding_val)] [WATERMARK(watermark_val)] [MAX_DELAY(max_delay_val)] - -functions: - function [, function] ... -``` - -### SMA Indexing - -Performs pre-aggregation on the specified column over the time window defined by the INTERVAL clause. The type is specified in functions_string. SMA indexing improves aggregate query performance for the specified time period. One supertable can only contain one SMA index. - -- The max, min, and sum functions are supported. -- WATERMARK: Enter a value between 0ms and 900000ms. The most precise unit supported is milliseconds. The default value is 5 seconds. This option can be used only on supertables. -- MAX_DELAY: Enter a value between 1ms and 900000ms. The most precise unit supported is milliseconds. The default value is the value of interval provided that it does not exceed 900000ms. This option can be used only on supertables. Note: Retain the default value if possible. Configuring a small MAX_DELAY may cause results to be frequently pushed, affecting storage and query performance. - -### FULLTEXT Indexing - -Creates a text index for the specified column. FULLTEXT indexing improves performance for queries with text filtering. The index_option syntax is not supported for FULLTEXT indexing. FULLTEXT indexing is supported for JSON tag columns only. Multiple columns cannot be indexed together. However, separate indices can be created for each column. - -## Delete an Index - -```sql -DROP INDEX index_name; -``` - -## View Indices - -````sql -```sql -SHOW INDEXES FROM tbl_name [FROM db_name]; -```` - -Shows indices that have been created for the specified database or table. diff --git a/docs/en/17-taos-sql/_category_.yml b/docs/en/17-taos-sql/_category_.yml deleted file mode 100644 index 74a3b6309e..0000000000 --- a/docs/en/17-taos-sql/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: TDengine SQL diff --git a/docs/en/17-taos-sql/index.md b/docs/en/17-taos-sql/index.md deleted file mode 100644 index e389f7f59c..0000000000 --- a/docs/en/17-taos-sql/index.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -sidebar_label: SQL Reference -title: TDengine SQL Reference -description: Full reference manual of TDengine SQL. ---- - -This section explains the syntax of SQL to perform operations on databases, tables and STables, insert data, select data and use functions. We also provide some tips that can be used in TDengine SQL. If you have previous experience with SQL this section will be fairly easy to understand. If you do not have previous experience with SQL, you'll come to appreciate the simplicity and power of SQL. TDengine SQL has been enhanced in version 3.0, and the query engine has been rearchitected. - -TDengine SQL is the major interface for users to write data into or query from TDengine. It uses standard SQL syntax and includes extensions and optimizations for time-series data and services. The maximum length of a TDengine SQL statement is 1 MB. Note that keyword abbreviations are not supported. For example, DELETE cannot be entered as DEL. - -Syntax Specifications used in this chapter: - -- Keywords are given in uppercase, although SQL is not case-sensitive. -- Information that you input is given in lowercase. -- \[ \] means optional input, excluding [] itself. -- | means one of a few options, excluding | itself. -- … means the item prior to it can be repeated multiple times. - -To better demonstrate the syntax, usage and rules of TAOS SQL, hereinafter it's assumed that there is a data set of data from electric meters. Each meter collects 3 data measurements: current, voltage, phase. The data model is shown below: - -``` -taos> DESCRIBE meters; - Field | Type | Length | Note | -================================================================================= - ts | TIMESTAMP | 8 | | - current | FLOAT | 4 | | - voltage | INT | 4 | | - phase | FLOAT | 4 | | - location | BINARY | 64 | TAG | - groupid | INT | 4 | TAG | -``` - -The data set includes the data collected by 4 meters, the corresponding table name is d1001, d1002, d1003 and d1004 based on the data model of TDengine. - -```mdx-code-block -import DocCardList from '@theme/DocCardList'; -import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; - - -``` diff --git a/docs/en/17-taos-sql/timewindow-1.webp b/docs/en/17-taos-sql/timewindow-1.webp deleted file mode 100644 index 82747558e96df752a0010d85be79a4af07e4a1df..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4834 zcmb7{S2P@emxl+V1;a3U?;+7k^csZdM2qN&7Dn%*x2S{YqIU@*dh}jm5M`8z!6+eQ zv|zN&f6qSb%Q-Ig^DD5^D?Yxu_Y&yl=0AsYuzv1n80>@#v>K{HxHS5A3M zYn^h+>qG1gq3eha1Bri6{L2iT>0rFBKrZLik&W{ORKvle@T+AT2;)G^BM!SzUC7W4 zM~bkU=8M+4Wu7*Fp3-|tr`6gOC*EA+vYpHlh8Q?>u7yn^ol6rd6b4aknLa+JOWo^b zR8KFc27}k=3om#|dX{gGt?x4K(TZ1k|2sZMv0{2S1lTVF&4w2xNxw@n|D^bBQ|V52 zhp%x))=aia2hG6AKeELPZtq^xA?oQ7?BOsoDX!KzyRrA6HCzt~_yHY4Xt}hXqm*aQ z!vi&?7OAP@Ob08%(+V#RmRgD&x(|$ptK~3%ug%e zl>o22*@Tpc9Br@!5Kq{8_>sxmY6K38uo|sRY|=uoCGm`B#(Cd}1&d3ckjHN|7Bq|v zr9?{o00%TyTEEYdVMj!`G>xCb(P^U0FCb%0(;)W|D|g)k(!GF^h^iUuu{vE;IrK)Y z|5_g_2ty8C9VB1om5qEg0bY3x7Q3moxTR9haD;%g! z&Qu7mhFC6Fu6G-wj4NRHtvi zr>tDK6oTkgH|oP&_|Pe(5?OHLapQLhlT3LFzuQzs3l^r+=6B7Su9d$JFgf}WitLM) zoMksrG4l7q z%ihNRBBoKztdOCQ8f2}hooAZ#kiL^Hx&!SJs3HqgAeQN~ebRUO3s2J%j}tD_>`fZe zFEi2nl2~IBkh0$l#u}OX(rQ}+g&*p-HlFwhv|FXDV?xlIS(@6sGHF+3AD{_ zmNUm7pnMDd4;2HcjwL2s z#jQ6)kXc7Kg6YY?l{MPe3T82C85PXg1c?R<0-7(>&?T zc1~|*&k1rR9j-2NkhIxMI_BhV??tTwarWmCeXz2EJ^Xm}x)$-U*Ksu`Yfpq!* zGM)$96|*4^466vDLP^&(c)r+H`nyNbF0?(_v^G5JeD5@18f4c@shTF;%aQxEjSVF| z7;|<~f?UTuoR+x_{`esHH32N;xn-KnFb0PoKAb@s6uJ+I=@A{wxVL;SQ}O1hn0c^J ziOmU;W{&1Sqh9kv(31Cek}Z6gzPg|DKBUxe6l-D9@J-1K_bhLoPn|uN(rVd?oT$k+ zxuG3Jfg6F>hhJkvKzpSvK}tc$R1A|FU8)!qBk^|LH{Jk_Jra`(7{;$!Q;0vsubIP@ zWfyhN4W4kgIgf=F1dS|ScRSLJl{-=Z+!;X51+w)^-ql<^>4!hB3G(2Jon_8E4zMq` zW4);Oo`_ak#;Wt+wK5%`P4R!yNCvk#v>~`z3p#`CKAPpj3D*^9#S;;T0ZwUW~NwJT>X_ z%q3yKt<}X|dg+T&B_-q^-_%o<8kY8(saCnMlGs6frsvLnG#i3PpOEM9H&u8G?a{l= zevz(?!l~3zTv8>$9*PPdG8zkGoLn4y51?w{=QQQ>ODY~@OL^=PiTB(PW%Rh|-ePc2 zzJW_*w&IJ2;TsdQ<`yZ=#n}X+fKut$|q6-cG^LTYs!QKWL z4F+_6!3{x>p*1FOy&SLFkBLyzgHh*ajFUKp(?mGQjVW zdukBGinVj+v%vJ_+%Iy|1;Hbz-tMv;@ynj=E3fn;J;8D<+%9LohxCoqcVz!Du*WwemLQOww$uN^5B!*w$=1$Wu8)FZV-TVXW%&1 z@SJbTxu93L(ASGLvGKI28%f;DP4y+qWU-1T>8gU3%@a4z&ebj7_HCPXdkpz3bIWhN z?zadUNX)Dpf8OlLJ@0wlM48#B)tl-->GpE`*m&_UGpsCVklHz$i`Rw=OVJAft9QsQie7JAE zO!VHXt>2vrVUhRc(3i;G`qE3J5iU054solZf;h$H^t?)Tje=@2Cf)Rg&Gz2zTCd-o z-#Z;(TN5@{`EX!YAzyv&*G}*2ZZ3rL3n^#9By{BCJw>-3q8yAW!ZYYKM0DRU;TOEE zg?RjfBjZVB;Tbxaxc}g=RIcayj&VUU+1mQF+ldZ!$rlg)A;^})h(X_Yr4|maSc+Z2 zH>n3w%f@`De=zA^8xC(iKmI?M@PAf7Jeq0@`~G?9{`HAIS+20vSdeJ4mPU^K{}sRe zqj3>!S~X&uiMZ~uH@nk6Ed|3EMPdCmrl%8gt;+2;I{hyKLpZ2$X;HJZqbK)+?%iL{2pO$E+-Mv>Haqf3Y$5r8(%wg!#kk9Q&(1i zngQSMAFZ?%xG2@U)K?lwZwytwZUiahP+1HFtSmOzQw;SLsp+<_Y3yss4^n&=CqmVO zi|aSE=Ecg=BtXuOo*pWV1zhNkbaO;o?p)9Z*9mLS2#!^O=026ZELapR(e&rUt^o#9 z`xj$a8FNE3T$m60Bc>V;!HStGBkScBhH;W{#dM+cZmld#F7V?gHH$;dvOMMJYsDV= zM4A6=II4yVy3?F~w7?rTs2Ue^tUy8oVMLAe?3+Cs{CoNy2Y=#+Xq$@aBu&!_IFUF-N| zTh29u&!yunDQ#Vh(y!qPi7`G&Rz|N z-bB*Jh)N|xRWWXCXxwoJ^`0u#7J1hTrH`w>kGUeW`2pl?Vx3*UJ)bT4HnR}UN$tJP z<=U6px?{vmD$eO&e;=}OeN%4BDg1Ry?|&||)G^M;qRgdntj*WrqSU^zbZN6OH_;Qc zMnu&2Zb@1!ByN25M3qh+AVa3%LTv&S8Ex2(6Yf+ce~!Ugwk>ZlpiyP#TwX8YQWNhS zl3j}{iZxFRj8)JRGa|9*;}kI7{D3j%dm}WZgiX_96%5yBTA)-feUk{RO4Lke`ZS6; zGcuV&RhQfL8AUJhsYh%%%N|TMt*B@l71gE_bO1s{!rl5v`I7k!9zqMs;E^5^U;}wJ zZ0uh$gpqP9t1nsUi2B(}dmeeNyB)RnB2II{0F;nOt+(m;%NR`(k!N!lbm3ec)~VoY z+J&Azk2Q2?eWKY-KMUh8>dN#?)R!U7P|YJ9NiRVB%dMaK*vOmd3s1$#n5W-cLogp` z&w`4NMloi-yfEJ}05@Px^d+41MU8nr>Zxkoz$qhr(?O1|#;bi9&aV+Az8mAr^zDL= zJIa(^*AvmzFw>U{Hp${X@Lt$Lr!W~Z5;*u-GD7I5^s{~$WY3CzCFJX2UMDesom@~! zB&dI?j8pD2L)ju56ODKP$c%Tp4gsAz{}QZn>|9~_F%(an={QYXp=`@@CY=3@rH;sk zKb7#B{Kx5Kn>Dio-H8@~b&1X4BSM9M(9c)fI*;j^OhXSD@Tv~u6VL-f00sYk{~c2` zFVDX+Q2Xr~MO^$IGGf`&f=&D#Tb@j>eWu?91)o?Y1Nn{e!n&&TTpfjqtF6cStKV1i z!Vd#aW+JKuC=RVz0=b*!QBT6NSnAHIG?772hIuFPfn41`SK z@YiRrD3V5f1>&P^A#RZ@maKM+)do^Nf!~C(OKl&^cIigZpYJr3p_peuDT?+jpEkOC#fO~oDkITnJsUE&vrNM?^Tk`oCq}Uz zKK3XRz~eio6x>s{IvrN8@U8XWaghQYC0ekAvPO6@lOmnNrpn;n;BMGPVQ$#b#-0L@ zG%>!j_{o@XXeb&RU_RMc6mdA%0ZNs6%_JC*QVrf)dhD+4>R+ZNg`g@45)A~OW~d}} zA6)aZo=R0uLJaJQLZfLS8`O+V0H7t_T9`6zKE1%DTV^V4WjsUFf zc5>OXA?%+r_v0{w1v)^}5Nv!kN#BlQUKymRUBgiX^EoR{iehObm9Lbinv49yy;I5^ zfC#1b>IDaCmm)>^!aG72ED?cc diff --git a/docs/en/17-taos-sql/timewindow-2.webp b/docs/en/17-taos-sql/timewindow-2.webp deleted file mode 100644 index 8f1314ae34f7f5c5cca1d3cb80455f555fad38c3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4394 zcmZ8jc{tSX*B(pB@}&^jRd&g~H%eK@lHJ$7ghYdkwZSkbg+zofmQcvPFImdIG|4v1 z*s_cmGs9?XG4JU2{;v1B-oHN2b?)=r=RV7Gu8)PGp5C=f0D!ITT{9aqHRH?Fv#c>( z8ZdQ!r74;J#YyI8sJhm5ciJ{P)@z*1{ps!_3yQwnyvDB98l2;ulNxPf(Alg&cGBmx zUv?UtpbwKJmF-P@hfSCP7)}q*A>)fSMw{Z(@T)ifF;EO7*q}nzUxLw1U`_R=+I3Be zlQqQ!V*NPhG8*}*Y_4UVh(&t zmHspiP$=rEY1UK_yZ6h`F ztS+5AUsXxF{zBdN{=4!`=VSO`dP(NJ$$~#x%nYE zzUe1gUteoJG9HPs_vA0wB=DvGTe=gEK3-6YKb}DoN-Nq#Bg9H_Sq7K2V$(u6ukINr zzqgTlbRq(d@PGFu5}%RZxLLs_P;xVzdc3!h4%e%6{lYtWAlcgEUK_R)CFM|mQ!|jS zNy@YZOT;Wbh!(tl^jXpWz(^5wyl>_A&^Bc0CdLg!`e1v)l;rcJcdVKLK(`Ov+x%Ta z$fuk#d;b;);=hPl#OSB;*_JxVemYMMqKopwwYHt$HfWUaYbA+&?uR}GzmeRTsQHIx zU(6SxJ(cfl2u^Ross~Dgo|V3};JLvy(kxf;^$lNu#C>--Q36eR`wU@{^F?Y00ferf zezh8@)yW3Bbyi$iZp*)1zY9lg72rI}*ae+l*Q(t%2&rW!Ab6*!4toZC&zbZ2xY! z;5BEKl_A|;?jTc)f84CV$Jr|HX^rfwbo@tDlSDgIw?}EFQ<~1o+l_OPn?2@B57Mzy zZWPC~`ZAT9T^9cLq$gU2SQ=40e6A5Iqx?H{ONExC8E(p;bs08{7$l2Sop?M>@1UJT znm~A%tg1IEMO!>zm&P#5-k+}d5m73-e#SDPvB!D0tX4H2BZ_wqr3iyI!o3D#o>y;t zWb+B0Lna?Jw9nJ&@w{v1q!`43y}$aAG_obtG2 z@1jIyxDl~BIcoBL6O!QEr`|9F?cYG>KOb28hfpjC3Y`W1z>*Rp4N`Mjs+ebg(qV8{ z0(7PB=T#Eb0!kq-cceJpq+^a}qcm3oU@;U$NivN)O<@xYm? z)fu_5Z!Q_kDRn^qp$P2fZ}9c5AXSdZ9jtj?n$3$A7F_|2~MM&X0T!x$C%(+O^5a)$eh`c?$#GMH-8aV~%-#+h71h|*Zh6TNbHi9WBL}|jbaCM5fNiog z!RuR0XbMp zb{ydR6lp34YLNrSecL=G4yR&99@`5V=ZHNmQWWaAL5yTre0qFtTWpseV}*BP5zFsL zk=Gd-`C$?&-T#EHnPxp?j6O@#S)5zY(|6(N3dg8s&waBYz##J%xV?4fDyazg5sw>bx13s88GRPS#5SlJ$+)>VIkP53HB#Vx(PbGnFkKteOmO z27fxYG=WhL3%9-@ZV0&!xv%c!XT6C!a;xP?pj8+bGl!?83-gcV*eyKCY)eESYx-;#sV-pu{H$YSym+;#~5-XuL&zm;1wx zFI266AQ^vmZD+_`b!w`}`;(!>ES4P<=Ck2CjG8 zh7=-Cs}@32WLfxYT6s6bft=3_pq(Rlp)FJIbt168MQlCj${IJptgQI+ls&27RdUi3;=rl9e*W_E!rHQn~7t!Ef30yIF^m53n1Q! za7FP}&wdFqJeG_bo-?m&ubSnr>&9f4EEqljOn;hNqcv-P%@|@2mODSI+#RwE8Zf!? zsM)dhTovfmv$nV*0Nv(fcEJr%CjB(_7W(#~2-ub(Pcvrxu?jYGB@FTwK$fQbNbBz2cti#x{rRbVHpW4}Ns8y4O_kSA24yeb&q$7L2rd za9}LN1Ahpf&x(y1vtdt;|L~52ti19J zNU_iD!@aix731059H5SoTS*hkXRv+L&ew`v@W9f&H#SNu~W z0uIXs!T5k^L&MPRZuEb4gia4Lbw}v@-4WU&SKZ(I&RAMZ_I8HDF72+dYB4Q?xl|RC zFRrhf^e{lU?=y9}D9#gk`54p+CAy>Mu=MweR~I9d&eOR>zB-@w8cj$U)!Bi-8;>p= z(UVTz?taLxckaT+XzTB6Nz+*CIfHW7Pby53M9X4_)2koPH}2gugsr}-Fa>r%Zx|eH zME5s3M{Tz1;?&jTGlBj>j?Xnbb`atjwOA#Zauf+NJ<;Z{20WJiC02`F>UAV{1P2KC zT)Phxy4~<5=3+nBw*NU$hg?nHoL($|4gpcQx)c zZ2vRYyvToIFrSmV7Q$uQkN8S2k%r7rbL)6?7tes z{-1UDpr#xJFM;`csTJ+k3o2(K?aBPCtd?zdtFGg(ydNOlXoG685DG`|#d{PF`rV6st3Mde;*QKNY(fAB=(oL)u`V$a!sOh2Zz|OT zct}0Ar~04A7oiIa@AK%$9T~2yDAJdpsojD|CQ!m%)z1=wxbO{DsrWJhWb6`N7xE3Z zd$ct@G%-apw)L%#ARKQ|>{~t8{`R&>LZXXRJ@$L9&JS%-I-GjI?`#ib^{r6f`L6JG zZ`F>KGL3C%QoA3S|J@q{oL4lEp-L+ScZgrJ<@kB|1A+#pt|XosWHtnVvN}dCdLcj5 ziBOC*K6=Cg_e<>Fo{l2u{+#C|YVY`?) z3lS?7twR7GR&@&1V+DE}ieq2?#;HSo`^-Xu6{3M3)pAf8wUUpK?0@S3TQ}pz7FbK3TDf6s^&Dz-6dcgQb$3fGKKL zE{rlmS@Bb!7Ogmb3ZH9^17-%9{qjE^C_Dn)>M?&d>4eOPMPtQ!@3o6l+iu8^aoPmC zMMFh?lsxYV&lzG|855kct%Rn#a4ayF{+Bnb{RpJfV}AaIQvJrm-(IK17*4X|5ABk! zZK3Cz(&JPOAaRo!eYXAMX+mPi0sox(*nuHmKcf1JMFj3Yx?YHD3nOj?kx1K6bJj1(j;Q&)Q9OLZu9F{g>L`*5w8H)wjvOzlP?&u?j_b6-TI( z%8;?p1miESjsr3+XRXBo3Dg|RjzGBg2l)5TPaYx(LT*UK@7m`~yVIcKqv}uuwL2F} zuI?9=B?9-wdVQGw&yo#*`W>9^umqXK*vmY_Bzn=}T9=OpsJG);P)}svG1g$85rmq% zVwrerR-BbMT&W{l-^WhIoU%$?DG}H&q7n#VeJpHnLd~7AOnQ%oK702+M6Nhuzr8$J z=?^tqmXK-X9y`E==Sm0I_aM-V+UJUtR-KioJFGda<`IzOwjcRsQLp-9wUUt$<7;C}#6j^Vce diff --git a/docs/en/17-taos-sql/timewindow-3.webp b/docs/en/17-taos-sql/timewindow-3.webp deleted file mode 100644 index 5bd16e68e7fd5da6805551e9765975277cd5d4d9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 19198 zcmV)}KzqMZNk&HYN&o;?MM6+kP&iELN&o;aLjo)SRR@DclEUo1f7k!OK`S64CV)Tn zQFJi6L3Uun8>~~K$S7b35D;Z{b=x7k>Q$fGt*TOSL{U`C;y%vc#uA@rUXVXxy#fbW zxW!-^ynq-PfD?ABil3}PEcXAun`C8kSFwe(hs=7|0h__Hllb)a`}_U-|5V)~gU_@g z)hRQoHb5%HRpc*8T}N`|Mv+Pq9cySsDoIp*6{#dqwIix}6sbJ8=ZqdT=W9l_!;d4X za;RCr-9Rf+JA5BhEX62NyX*V>6{!^qs0~~-Uqu>)izCOEBdT$z489#v67-hwI&d4?4X3KZs6tvb^k#0sXWsG2H*sVai(2~`xPiqur~CrHgw zgyagUrm3mgQ}aw!4GF2LA(*N&HC20R5LBJ1iRiy9$hM`XIz}`|bl)?9a9MBov>R=k zBT0_`%918Ey|e7VIp>^n&N=6tlkKX^FQO-6Yo!!g1#3T`&ia8f;%pcjTFrBz<`(W727B1vnWMHl&D*fERUYxltBCZgC=7J0GB~F{7^y zE43iewzf90{vYCf|F@dn?I8rDL?gCq8HfCU1lzXLwmOmnWv&Be%MKUkKdTQ;Ea|=e zJAx!hQTp5+C*l2YJhIU3G5nAJ?8P1i{v-ZTLsXg~@+0h$&0%YKJ)VQM;2MZp3Z zc%6V){=Hb9Rzo60sA7n;s3FIY71V8kLbzl?Es7ZGLuA^tD_T@gVMV;Pw)jQ~@v7(SJNRuQZ*IIqCJ3&>ykI~lPk zQTL}8(+NIck>%e>gzp~soj*;v@vC4##Yjevv7K5VG(-u4Hzj~Ko<*4cmq6gHV2Pod zHk8K#I;=+#foQ#{GjR%hyg%PaJ%x}6LA)_YHO8JaYlO%YsewF0NGd*Yz!v+g8{8nKGZsQLuiwmoY;SvhgC z<=+Z)F*W?FG5d{JVze~8Ma58OB_JkJMN}=s&@CYG4Qr?iNF|URbtX;~_vfjG`d+_z zXb86Wp!6hWdV&^y=6?|aL;thS#Nxm3FYw3P82Mt8s6_&O$NpZQQt`%T-Wt`YJ|Iy& zYLtPR>%HMci#g;#!e*ELl;GW%i}auWS(c#z#j2{L(*{DoFqpgg|dX zMG*MBvtx6Z?^FWt4Qvr#uFp;1-_vBE5&0~jKeF$^F>_aGD563 zJPfQt1Vc3pSrG+6?X~Nn_zT|K{T6@mg3Va$zp(ia{39D1wWW#iSCG;K_D>RX<)1pA z4{Q3TjI_$CzvhqrL;k8H)DG(YH`l!l#zXRV#_}%wYd74`8PxurhyFRWJXqAMxhM4( ziNG&@v+eRH{L9|je|7e4K)t`if7J5p{>&W{zWh@BUpu>AmmPv&**<&8ue}QlomOx5 z=ZlpuwLSF?gop{lmMZ$YeR4s_=AT*j2mPV@!s6NO-EX(~uW0SzA87gq{4xIoc6sYJ zeS*TxwTB7+N|)P!@qc#&w_SDIAMw{4g_`?Qf!F^T_iXUvrGBE(Kj}}g1OM6Hb;Zy7 zGgSjklpp<&73JbR!suQT@BT49@!dbk=-(Ut+&|&KU$9_f$j0BW!H;uXpMmEhzwBrE z6@J{8-{pN&vVYzD&#daho!u z!B61+aRltI=bmlmJ`wWv6)TE=C5@3UeRkMir?>}jSO-KHGTnmZ`;_##`0P3AAL~;i zZ)StjpG`{Nyq6ahq=xYazkGUXaGlW;{|)|L_cw*unT4=y_358++j>FK(gqgrrv3+d zrT-FCfuH%)n?8*@lRsklJqCd$l0?6B0k{|<rZ>9e3A#BD9N_M=Jaw^_@X7V zv*}c;Nbd`pQ(5hTbT5I3aNNvp zbGprOe_Mj+O7H_;M|j35ceso2j3{Kkg<9H{j_x*vs;PfOAv^tnD9;spQKaoV0FmwK z#sVRH_spfdy3Hfl(iI?LuW9B6{(fU8L$o~oHfsg-OKMhXk&{uhe&z)tu-)@763>SJ z8``aR2bak=hqdm6<0rVcX~6BT?N#*bOL)YX)XXeGEqq|Ach48bkMF4aUCq2|I?3I} zi8o_en|eVC&U|6t#*G{QleS*ES4c!itrq*e?Qffu_l+fn?(XiQ5FK2oba7R@1xv~N z#*JUAx8OMnMyWqMoOqIApUF(7dEUNO8T?yw?r&yG=g6^{R(NWsDnOK-$pw33T2bs% zyP3r|amzr#ZmC;1GIcFjm^IzV8I|wzowlC7Nzm=CC!l&|>W3Zv=y%GXwwG%RR(Le8 zG05N{ikI*6YwVV9hDVnI=cug@qF%9tNOSj+05u>C@e9B(o zYDIV6EZCRQ|0ttISZj%d+U`6Ai?5eyhwLbRYchuJ+cLe0Ce^Ksy(-;HQGuUzrXorU z5M610(VKzCoY)>kOVBflTgv>~nKy?sQnv|GJGB__IlmZmemR2x>)&AH$d=;A0=4pn z4v&pnYSG?t~W&>c@>+t&FXuW2>uX&(MfnKwuJraDXY27PU^YNx7?|FFduLF;rcGb_GNs#B9Lb5ry1LEZcuXW!C#MBPH{>~wvs64`p`3+rvE ze-0FSbRi=x1VTmhlh>XZjyVI96(8I@|D7kZ- zn-|IQR|`^Z^)Q+L2$c`EpVQUS>iN!fC{5xy^jYUzH~dwuO5jlVfmCXA?e$04Sh_oQ zb91}=Y%M(t+yqn=lJ}2rE^~c^5!RLFmv|j$pN>?YIM>a5;rc>q{ze=zoWot`9?oT4 zUeQ-UKWtv~#1EG%&$rccRE&7%HfxR)tnp;S9tW z6b7ApD8Q-;J**g2D+oz$+HXgH!}$thCrk!aLPW40CT{`(AJna)BO`0*jX+Sa*#76O zGG5Vkqp>lr{#hb6MR+{s9$7WQ5(Nrk><$D-8dAOC7fEi?RUp|fpM)C9TLwZga_UeK z08J8hE~C{}v4RJy!m6=JP`E7u1>G{Cl_)U*-TE~T*Uv;K7DGdCOgf+dNI^po1*CLW z`~prmp$gb1A?V?`)O24)h%WzqyRvFfwo z14Yd-ykm2l(T3B2>E*OerH5qGJ;4=Lm@u6xNjg=67+bHnI`-6MWZ>WQ0i*P~EAYR0 z2u8vH#!eql(gg)A^JJn~P+mKK1@5u)qEr@w`NzHf{G`bPe+1)E|t z=)S}7Ai9yqEcY0C*(W{%f*>eFjK;jVcfYsk=@2!RmaK|Ko_nkewKf_VK`{D48O2A? zwWJ!H*#}P4UeGIXkD(8Yj7J#NI+YO-6Iclx{Cgu51-h{wufRQKMleFwoIXmG3SxwG zxK)4B$UF_?U{~bGa*v(&gfmiBGUklNyCZ^;;t{n7Mhrv{py|e-$Z?OMO*4X)Pc~Y$ z3R+^MoYa_Jn5t5uD}vO;4NzZwlAiUOm{eL%`(h6H);tllYZbCCTaG`E8Fq~4>)FA`sH7XJC^sa_t#H64R=f***>wpeot82K619dyuFvn zsjJRz_wT(|bMN&Rx~jjufBoze#>@4$&u%BU@4fsc_`RmqxBHK^ev*X>-2bw^z1{C` ze>I=pU!K0|gy!yFbvd@S-e2ytdwpR!&wWS#>~eql`tQnfPygDkA-?Bu4ex%=Dw8~Cy#QysV?U(L%cXSplrt+|g}e~?=`z1DiCxTmJpFOP)Y zqsK4PyEOb#hlfq#M#bCo{W804dH|Le zH)fBo%-+Csp1Zl_(D^H7c4j_@Ki}C|oSoI&D~tb0S>Kr5Tv=J!>-RpM=e}}d=hpM* zXMQ@*ef)s4ym-Eoy4k&~Z|v9Ho12T9v$yl?Gm9Ohl~=C7oeR?)+LhUzlfHduXGe2y zY_9AacgQWG=+#9niGm9@C3B5;;Ulv!+ube*^c3J0E{SFISID{YW^AT!ttZ4tGC@ad|SuNV%uJha}U$1kh%t2f^Chj7_$eb`TLO)ec< z8r+xv-dei2Ft~UPFL!ZrX|Oc-G2U}wZgOd9;d$9JIX4*0ZM|OW|2-J2ecr<9$K-S7 z21`qW!DFVlzie*M!JK@j__AYz6!&0h>&JF=Zths8HwU>o&&{5tt*s94OZt}$23vy! z_r=L~!M=Lw^xV?I++%We1@14~TIwJzy;Gih@RGrQE_HITC2%J-_r*acbJy^4bLTqQ z-TJW#+!Z6PUbr|nIk|N^*LCrdhXom zL4td*^qPG7+``<#>4kUVJ%QTy~@rMGp|xrXHqU0QqVSbQ9P75j$>P`i9v?D39c>Q)5^u3b!wad|?R-GenoPz<3Okm9bcyizqiZV?YYcI>>M zM%=`g#gi(=kgcKKnj#Bi6M_e^STvwJwHqhi%mI%f+AN=N#K5o+1)?tj&qP?P8bXG& zMdR_4WJHJ!T+vdrr~Dejbw1#A)0@^4@J_(PmdNleV)JJEterrr>QN#ii38bG&bZdq z{DL`==QjNH=lRQ;r+bg9(sVPuXm6!b1jeCEjgitXHmN{>gsnt`YB4h=B*Mn1`IrwO z45VD(hGEr_WT1IzV7-MPS431r)M$G#bUQZCE->;{h)mIKV$etoCs`uHqhVkL7D8IC zh(}W!qtiz!G0NYi5{lIrZOH~Q%1VqTh2R4r62Mi>%Bi<%%k*GFCA5%aNV(-&=ltT( z#&ub4E^uM;@t3%lZMt-t{60Fx3H*@~rXEI`#GLJWXYfL`le~Cny=Z6KLoJC*V8OPW z^RCag!>G|26TeM9{X8KgES)~M=HCxQg^Nl))6)o?)on`6 zl?c(GBO5h)E)*2qrK-(J5A^L;P|l zgEKGfpG!?uY^s-b&-f+vxOmKy9MmDR`|P`@sH6eFgGFzB;v1(wFpzV!Sb4+FH`P10 z(Rt#bV%@DrhcV@Ty&9Ek&v2WCB}Ml(K6OM}V+Sm+ncng~TXQn^8x_u>jfy|dY4pC| zI&VYix_$CC69#p=nso&E7A?NiRk!ABm#>*S?;E%5^z5Aoqo*#vXkOB<_BSqGh`pum zL$m+Kt+$ipTa5csSKZgBz5<0W6NlDt^*&$zyRY7+A;%6lbNpKiZns4WX3)I&rk=;S z-xRof%47B3IG~B=u#yMs-CXj8*-gL)F1UDgeRy=hiOY^iA7q6{-1%gs9e&(vz0Y(# zlUVFabqfvxG@Mn=xz-84Y&JCbU9;TWyITPm2?q6}d#T2$`}>=!pSo29ocAdZKH2Nl z?r-UX^-S3x^^epD%x!U?-7d4s1nG8=CXF6!bT8Iw&?Zj+=};Z*w_aeLOImbKdv$o` zYr3ikqE+7Gf9x>b;alc#<+Cpl@OQY{mMty1qN))GWFt zsn99iN*`L3g!gh)I3TqSc07BDB4vu*#^K`{3t!YdP4cj9*+gG2y-7}CxaY*5BO5s` zZz>1mRfWloF5%I17g&{ZJCC{r%8nM72>rGt(I1vy3xsq<&BCo6LRu9(NjB6PaO2>E z1cf2xnrof%%lQga;Wi2-0{)jrXOElmefl!{RH*wKwhjRb=F=3H&A{s;xId zkN?J{hTk4jrg{o^uHG1yntkcmH6SC~uk8*z|D6@<2pv8*_ z&@+9S`7?wn1ubLg2K4NbTYx*S7uzy_y#ud2ow@-V7@_b5+-ux2r$(br+g)RAx4urJ zPTYB)hPnCE(-F#h#^n=;RL?5@S9N>3an!Ea-Hl4YV?pSi*H|xIUuSD4e&Fk(E%T=5 z`@^nB3X7T*MTecOS~Y)H4CvC-)jDd#m1vEHSnSi21~tED6@o=)Y-j~qp(N!_@Ce};(s5t& zUCP)|&sN56ScqECNCTSm!WylF7`g>CiK@&W)9Tj2t@JoUGC3(n1xxD;+0V`skl_Q!ma%71;=k$@krp-Gvzw> zd6wk&JF3r<)I5Ie#f>(%cDJ2!{MwoY3!3J2-uUX!n_JXA-_^P+-S?T2<7=(B1S^_e zm$feF=`wlu{;Tj|BW59U4-%4|t9|dQq+beng?%6`VG?Mg#d=}2DvAPYTAU&)0b%&B z^GPqOdi23}c6&!|s+RiKT*|p`p}u;9!NYg;?bFGx@>J65fZqmzr<`+cyG4i=7Jn4b zcl4Qk+qlbBTstiZhr40cvez83G`cs?HdP{Hlx<251dXBF0vZCB_i6FecK-9c(4vRr ze|KP&n2pvxof?&Yqe#xITQbMw|1kCMT3LR%fjQfH$un*h=&VYr@n;@?$IRnH{9<== z=0=qkmC*q1tX=w;x^Ig3an^8QpTVP+%VjAZe|gHH3ew(9zWn9Ei?+*UdEuq0vx{yN zrW67;HO*&b5h>Bqo*il;do2pR5oQW&bw|oC(+~i6&2y(xqF@lgo}yYhVtsOp%akzV z6T$jlaJnHN^vKtnVqxs!?uTrj3TI_5QIaQgXyQgyzw^R^KGolj?7IA#cD=Fzx@Nv6 zYI)~<9v$LV3XZEc`6bKvAX=MVG~>dP>+U93&?F!za&+-Isy2phLyHHZh3;sJKdT0K zeqe`HppcCU&?D2QInh%0-LqYvV^zi$UB*GKO$1M#4oGPw;?S^noIZaC%2?N-UFes( z-d-%CSbDhAUr|Q7YxOL5wO0z%hm2lE8L#LSXXC|%2D|@Sg@I@Sz1E~ zMD5+sk*-_y>JM%0^Nim_bR)E1T7!XYWdns-E+s&k=yB@I5i4wI8kwdZx6=3ScfSUx z7#hwS@Fo;X62LZ2eH75)lB2#;o3HYnzDmLL{qHhFVG$p_Sg^04Duee5k&f;hp^;Py z?6%s{d9MpD*nh9BqyD!m5d4BB5s18)w=V!e-3nZK4#0xcXBYK#+Njb!^H!9Ih+Yf( zDnz5^Tg#GNhU%i43WsJkC6{HcQ_+{d6R4EN=aXxm9pgQb%C;j_S&}yuxMtxM2y~kd zvLrpyFLQl`X!QQPkUAr>)Y*$=sWVVp=V;-WPamPr7v6~l>eWINJ@E$-hJ8)Ti5p4*;Q|&{@Z## zIc`h{5rTr?*AKkwmKk*q5j>W`Ct$7Fv_oj=4twd(IQ9Hp**Im`@u=|&sf9>K}x+n8*H%fJ}ie2C5t~(wtIRrSZMMDsP zhf=G-YE{>=N$V;9jj}oaeYOwVd4a`#x)mJ<%_!cELg|Wlw5c;!zjyDZy|02U>6Y8q z?c6_9*vE~)lYNz2)4g@c1uD{Z;(>!MAJ<(N*yp1jJ#oNxWcu_2FMWL6cFVfFA=hjk z_f^2Fq?%@>g@<%sd+EG_S>|3)SKmv4ZZz62w^#iz2qyCZf~U)sIE%I@3mi}sy_9~h zk4J^hMJ-Vr8UgoJ+@I0C~3yUC- zo0<`#@ZEFVluHodOU=xTJ33Od^Q{GL&PvMItcmfs`W%P%5$zC zAbj^0c`X)33PX>~%b#-(N~<$n(VSOMZFTEbH>a;6@=Q*;(UV>kk^I(K`s5AHrG_sF zoCapYLMJF|wKiG5e6%-Ep?)HZO;g*wq9|NJ)oosh?`xYD#H<}bO%-^&nXh5?7Ne>_ z&C9N$s;E~WGLHc~VTFdlOR+AW1vlI%g8D#8D_TncZ$m7Ev2=@A`u7mX2DUU5R7}yP zVyTrxu>!3y5AtY&H63wOf7G-UNL+$sV;l%kMU9FUMuyhZGSvfw`2IaWy~T!Q1OVEw zf8a_TRm_lzrl#4fT*%tB{ndjNOW;I6SDJ)hXbPnQcSLbGA`NdMkYBmHag|yi6GXW0 z732`DdL9vX$CNY$L%S&O7EmsB7hP?&hOYYWfab)6%Ys%=xPdckNhM~+%BEkcM^>LkT-ffEgCqm#hmre*0@Om$rJ zP@2SN;hzm|oi@=pEB0j|^}K(#Qat z8s^fd>Hbtei%c(E6%$z<6rCsFUBg-6Dk();qjb8)FMUs^Br>HST&GV<7ShnT%%uW# z1)9my^HPQ>6%A7g5BanfO8pN=03#}J+j?YClxKsyY0e*n0EuFwcHhy%Jds$(GXo1O zXaiiG_ebVrR(Uyc+6Cc}7*3JhDbo!+!<`~F=}3o?>RZqCz-*Nbl!+|>fpToqcw8tY zwTFlDTjPQ`jXHi3_L#8ba?UmKkR$?vxjj z`a3j+6#?ep>JA!JcUpxiM~#5S>t;BRNuSC|kpPJ$WeLY!fQXY zW`aaXb6pW#XV6L!h7Sd4$^eF1LE`egxV}QKLccwcP(8OukWwgkt3|=pVAQM}zfYJI z!%glXhbGI}+6sOtML;O-nnL*166OmXJt$EK3kXY?F?Rs1&?Y%cMiko>7Lqfv>n#&b zPjTnv_An?-f;Qx!F=<)pVhyb-v^GA%$K}@@9GQ+t>ZOG@K&9RlnR{AXli!#S-Tq2e zK2-Il$gY8iReH`VV|Cn7!7oDX;-9OzYih*roWXGq&wNqFDUPWj9Bvhcd9}eCo-(_W zEe=+#Oh8K!Z3{uVXgXL>ltl{W0-$v@5|kl*;`e(6G>&`5)J;gD;-t=a!}Y{;CMY4n z8f9a*E{m@@(?%m#ya|m0#x2y6dCuR4Gc%Jab2RUoN_`@&+N3Ek#hntYhbxrQ^7ATJmLogcZ~;ZKIU~) zg{s@?K6P2_`qpIbryFp}ErvVfA;Sw^eH372zmoN(qN=b<#VmEeb*~qc%&CY%g~OgLZrk2S!&%MDI;laB zykGvyg-sSTcTLTEpPm0nDgpko%#9ttUg|PYMFTs=lXtd%ZNG8#72(Sl6&!8uSG_T$ zc{$H*e%!qP*Je(5+qHA2o1IPY_Bgs=XQs7LNZ}223(hifU(E_TVsP%T8Tp%uV(12I z2($9vY(2EzekB!{-0Z3I&v{2`wi@pqBAPeqSMPr;ZX^EDas6KS#zk!qJ?&lFzi{!m zeci z>){Dmt)lvM+F#_(H{^A0^o>_vjY5?Dr49klwgRN$l6lRN?7eT1RZ@&<2qBocqS5V4rbMTmH8h-S=yT z;ap14BE!<&T8Mbs4MUdJ*o)>xn~@$Py6P=%Ho9ZOljbC2|HINl!P>mpS-Ocrw5j05 z$i9+V0CsEFEcv??r2NvIa93(h2Rn6_8z6iwRUbqJi`RQ+s>)qcMl1gD`($oc1Z?1R zb%esGn(%+uU5m^=7_iio{zPV&Al{yQ3^QMZ2uf28vcaoYwFe(5v2-KZd2Sfca<#xk zGe^*4Tvs^Ujn_b3swUH4DkAy6xu#?D9u!u63wJGrgRSyJi@O0CQYl{VYtwq}vm0MI zX$69q*oB#Ty2+@b_JkT`VvtiuO9yzg|G8Dk&jA!l-R&tS-!>DNrt#NX;06*?mnlh z`NVLR@uDpSh3L?>Ik_iwxI(>jS{sPhGmO20t(SkP48AuTkgJOR8&n0*534&%3F>wT zub;9~hz>gjU6VIGFUgJh&(GTVzgZ=T;3IUgTSmL}b@H!Voc=x>(C6+(rF!S}*uuQ& z>7+twGbBDIax9+zP@XRacmhelL0^#9-fPJKH z;SHS{>~bJ}?4*^0R#w2zA9LG&Q`R+gA9mD;d60906|~);y^npRA;!w;Ikk)#JTCQZ z717T23RmRK4wZ%*N}xV`MuriO3uRK?m4L#=6@lORJiuzl zlaK7S`VC_Fm#HthM5+px`QI ztqWdoZtKyL0j_qg&SDQrVYy#uzjWyU?8b?RpkMUjHy&2HOD*W>QqDcD?6YFSqUE%A z{>`J35vy+pq?>d3?ivd!Pv%{Rm6bK-AMl-Ab-}|Cozr z_4RV}Q;6_~yVjobc4>Rqi6?Fycw%D|MG)}vWbyYA#mCSMYK#vdzDn5A z;z-2EYlWjSBPJgfK+i!}fpGEw4iHTLN`XEwB;pcmh5c($cr z`lavNT{d0wE)l#jeYOrd- zM@mF;R=HlJ{L;PV)@GP7pni+?=GPIFtK4-e`fQs`ZNADS-|3N}YTxO)(0qvp`{LGp zMHL#o8SH_sv0|W?X&qD3@FOpH76jO0TOA2kbHPQ4Zau3f-O;)g^gN7@s;EFCz{@MLP|Q%S*l8H;kE# z3W8Z0rP50P5dwPeet=H21 zi7&sf=$)~fstm7mMY{`g?f^x}IZN>|WSx-yfzw{PzxM=AY0-3Q-~DpN2Cj_+gtLbg zrk|Ylgf$#8;zb2`&J73jeKM!raQ~O@y{pF1ZG;8s+M+6LE8W$+`p)k59(VFB%fEZx z5P*K^906LpmWN!m^#WnDJ7GfWDQ^I&Ht>l@XYA~*Wfm)Z99okOEtcHSyW(um-c>hG zA5nF7ErnMzH5BZGJDN;-B-0vvwPpuCdS~74=dwAaC2Q2?m#X|%slY0(MsV@?K`O!-p=UkRM@VNJQJexl<#MXiHuZpEdQK?ch-*r1y@Cf0CZ|hZJ*v&5A zrCnZcKJ58?=gyrJsd`P5NSrHjY4`J4i-jp?X^5p_dJGosbHy&5aus9fE+EtM?g3a^ zGgH)dr7HK2aOu_G772;Ip6dse=+wcfo!?eeWXT5C=pcZh7mst@%IGyViQ>=RmQK5j zb1SfwQyG`%Tw|j^k4NBrT;0qYmytw=*33^0e+7K2=QD2jN|Xv`X53R{l+b>#GY@S& zZg!Tw>;acfw|z-P%ET)(V2u#shctr{wZ6=v{qoj7^2IdxLYBL6z~hPn4^-bVt1Y62 zB_n9Ln-U8U>wD&lAsvOX4A$al*nz+s_zU@HNk5u2G06LkYZf_JR45Q;DphwHAa5gsb`+qmA2hn+hJoyAcv} zVim)qcIx6#0&@u)E)ATm+R+1#fFygMgy3lff~cxHKTEe>Rpq?NRt2jnN3AsnNvDAd zwf`YgsdA{mZ9ve$X!`+yGu!NwXSYsImQ`|;?DZ>*95>vMmsTjhMfaA?U#+dY=DoA8FR@Gz0f{HOZ^Uk%yxmOd12hprl=OK zxAFVYhD0&S&4M>Oq7qnO^tv#R5|K9*3sJ!1M%DVPyiTX&G{zaCs$!;J?m#2-HAccO z0voon++2i$5-5_4AJx50eedot-I9~_N^%wfLqI90PbL8G!Sr@n8lw=YjnQ5S(b&Sg zVnaiZ5)rFu5eA}GmRYs2X7wcrkyyGzv1SL275YFIi3Du0?fM9-s(~CKgcwb4Wz~3G zZ56RZ0iT*NGyLxnk44Pasfagy>bKCl-V~eb(Qw)+^?4w*>oE%DD zr)$>T{Z00??Q1Jxnu|?v_M|maKJjs}!|@tB%xd8t%*{CBV%tC{&4#?k<6_&DhB48a zTZP7<_6tNh_4&2^?YlJIZS>1l!ZH`TEJf!%BtR~9-qX_0MifJ$_~By5c$GB^yYe26 zi>;Ss?`&OnE0NiY!rL`uzh!cAK0milx3})nmSUTWO@xS|fqPODAQ!vEV`HB5ZOuMn z#yzgM*rAwX&*a>*C5cR4y}rGs>^8~nhN^wG$xgrm z`&2-&iIIz)&9qUm=5af4Y_~?^7JYfF#lBDcaIqDl^M2smf~UT_k%XmtI4*YM_!T*u zWgzIu+;p)ij*quCHQkw3CK%^pzw560VSTP&_BGEwJ=zIe_ikGv%H;>WOXC> zrDmIp{Rls-xE0)%xy*u1gk0?M=9}6*ALELIN|pLnb-%6BF10S6xY*I8)OfrvS*VIH z-Q#evcjM6#CWk=SWN??J*#7+auL@ns_ALx;36P6j zvf<$s#QvZ_cG02Akil`p#a@DpEpDS+=js8z(;kkCje=5M0T3u&kQ9MyiEQ(td5yW@ z^U<6lq-=Asn`La_){(&chq0Ovx!B;fW+jak4}`JpzUG*dmO}Nji61WZQ3C&Y`pD98 zdxc;bvWMeh^Dly+Ab`=W&V=~*R+&3GpPYU0+ewlbwz=4DXp_3zzSNTH>F2G2CO$5< z#bPgR2C5NiQM{#R+U4Kd&~ocLhsF;Vdo19;txM0n2Jz5x568tevj!kbONdMgy2_hs zAfk9=&Ssm7{egMj9`e84JpZ|4{u`K#Bt9;-qEPc?1c>qTLV7Iduo0cnASZsf*tZh# z-|Rf~wTV0X0@^9-x!AjZ4KCYU?Eia&$AX3>ABa~HAs5@0u@#urI41H=%L-Mxg5)YnvpohAX4^;bMnIv)M80*E_UFWQLqV-i;WX9C3qSrP~wVPYl{F;j$iB7xvju31D=3?vR7H;~fJe!LhEjT4Yj2EYX zs3u}Aw&86NdL}KlJeiA~O&h|Vu7l)PlJmsJ)WfXNR81`P}LjO5#fphgkrRe za1>L z=1rOF@;-RB89}v#kM46VZj+59)Ef8o(2~iAoFV~ z%UXlH&+|T87ahR3B;U>`7duio!!@+x=3=ADlSTU~z*X+> zJq<3lt&cz?qvUXIofULBY(~e`I8DYd$;I}N)~!q3CBqI>22Ou_yUXj=w_s09yj*Nt zDRghR;r`wgD=5vT?`d$cwbX729a=7k?CNKZE~jOSo~8??4iG+z0;62)q;t;YyEleX z>(+c%tuGP@vl1^CJ3#j{BI$aoY})hSV(U}_MuG!!Xgyyq3bnhQbVHHa)6X`VNiMeN zTTrG=_Ao{H1kA<8i4;W5!vgj+xY$~HTqvIRb*VS^nf6=8hX<&pZE9!oGqh%si*4%V z8?mNDYdFM`fVtSh$O=iL_Vzru*jm1x6S>j$U)x?g<3n+Jcxm_Ctsg{El}c8**b*4G zja5W~CSESK$d4IhT5NRIY-esMpYmvmV$)h)ZS9}=)f0KK$?n}E644x*-0t(QFyE_Q-5n{7Eg-fsThZkPF8w{PUnx;Sz9#H}U`fbitIS~JPT z7FfnO148>cBq4LLoq}?tvZuktPFkgPalLu_pjr8H_c)e&7Z+2PTdtJRY;v(t6_6OZ zRSwFZxY%ap2Wlcq{@&IBKA7|tX z3qr5uQCw^>o0ynko>%U<*b#z_wZ)pU>-Jc}!^|UlHaAkHmmr1xXKKwP7rWpEu^QCv z2%=CP#l<#%?NqA}Vq2DLE_R}5g&snb+_LIc-NOj<#~HnuX48w78%oe5^}1z~i;ZRK zul;hDpzgc(jPx&$;$j2gO`mMyryah+S|ls}UqBo}Te!Z(tAmUzSW{NrAQqj~~Iqe-0l-(?08Uz4|5y6qX%ks7ubSqOt6oX}xiyc_*a;-xI(Zf^gh&&Nd`4bo0 z9c5O|mejXJ@XwWdGq~D)+oi~hrz`lDWw)cF%FCK!ybVIx481?X}Rv8bsr_-=~w>5#V)nVUFX%GcKUq{v*?~A zz*ZZ0*Pt17uTgf-KA0nlP(6vrprDFklb`$}(Mu~eCb`&n=v><6yK`CPQm)8tT?Ax* zz49n7Hmu5bZos`e)MnA5z|i@5?y0>|WOw8~3LvReI>e5GDnB#qDkrpL>hs$mYEqgxR=L<7 zm>xf%Aj_w?*f>EW6@npIvCz*d_JBB-=UxtK2LXToGdL-)yJ2NA&ga-TlOPB)fr5VFk0)>mgMI-dx+*oH%!7eo$dPdst4F)6!{IV-r>avXW6dG1y6+@xR+ z)K+-=LAol{n^LpAt*@RNR^Bn8S3?GSwqcr!EkVQOv}}C>EEh+J&KHyzWWS?}1lyD@1@PP3Lq5e|tXE;fRqGa5V?0nvqhvGsoB z^Pe{K=9XQzp;M-P6z*;19>Em98jWVoQo|D-?d-GFSYvmq>7@6kc&NU z=BzF=GFM3?)>eBDxM{%;+j_eo?}#HV_A)@Zc3iQM9oB{aj;!8~{NVv#SatWXvp5FH zaHBQG8SE&y*ZlfrAygs^1qw`av2pYegb#GxK{Ww#v7-$qK$LSbUUd1Nu_%eoD{937 zsoC+v#Xg7-*6w_8@V({`TlI(Dk9=u|HWvJqyW={Y$ePU`eY{}PeD}T}I0z96&NLT0 zz~Y5vsbf(j0dlb&qoB8K1i4Uc=%0BI8-5FSe^~ilqxj)szod9hJSltTLfk!Gy#CPp zk$<*v6H$1d-Eoa&dHE8V4U0`(XsZc`8pROt#4ydp)@Kn5#~o#rM99S!O8AjQEdwKW zd@py7UfS&Lak>1!V~8Ivc0++-1&^}&+X`0<>JPmidFo(?2%XAm@`Sx^r9bqz73e9} zeNng}h%e2rnOVa$7uzkw!?6XG4KY3ea5} zhl{Fc3E_Q=ge#3$T%_3}}$ZlSGm*dbD@5Me7XT}p3TlVi}g~7(ra{CAZvju?g(- zW_Ptax!`tln9OnmWZyyLhzL#~@M!6rWq+-vI{8N!bm>FVV~J*(i=B79-8Yj;xs)3% zpjReBF7~<3IhS%xfn0aIC{{0<>)y3ZHn3@9{BW_8SP-Srw>3hL64t0+Wh)$J0-h9+dRQ8+v9MRpf0-AL^93AR$~A*urJ0YJ}x$>H(dlF zwLC_l+g5_%^El#SBVbW;_z|-PYB$z%s@Y2e32`pVot<2&^U}QAG~G|nk2$g3Zu2K^ zUs@p{!?}4YKrGW-Y!vgE3M7%l$Hk7saIlDmW>h??SX7f()W;7OTZ}+?bA@G17I3kh zB#Nj)X=5$RouQ|=D0j~Kn=W?uY&Ord#h^R#SO{d#j3|g@nv1Q0@rfX4BIIJD65K6# zvy3eWRs-EAZn)Sn+OU9FqSA8D#V%?*s*xh)Se9Gi%)$aW+Lm`q)RM=?`3fg0)vpZ8 zTBbco+k<$O!2PSuvw(|j!3=4kuqV&W`H7=Aq8P3w zOgyf8%OjJzEKwi~(_Cx?HPi&AizPlTwna4PP%vF0W(-t`;)#nLnjaKYCAg!AnnSRd z9kfT5TdpsD=m#D@d+539rI(aP@gQO2Q6x1?bFr0(Vu|uCiI0oTu8+_L3@3!*0!0*! zCoVSPdtA8!y!O}WBDCM2WVz+y;)lxBa0Dg%Dxt}g%TylU>ChCA#OOzkxk-~?kng-- z@!r|9O7ZGUo7P5&EEFRx