From 45b3420a57c4660e8ad0058ec77533c34fee7b35 Mon Sep 17 00:00:00 2001 From: danielclow <106956386+danielclow@users.noreply.github.com> Date: Mon, 22 Aug 2022 06:43:11 +0800 Subject: [PATCH] doc: english version of tdengine sql --- docs/en/12-taos-sql/01-data-type.md | 53 +- docs/en/12-taos-sql/02-database.md | 198 ++--- docs/en/12-taos-sql/03-table.md | 197 +++-- docs/en/12-taos-sql/04-stable.md | 139 ++-- docs/en/12-taos-sql/05-insert.md | 117 ++- docs/en/12-taos-sql/06-select.md | 464 ++++++------ docs/en/12-taos-sql/08-delete-data.mdx | 17 +- docs/en/12-taos-sql/10-function.md | 918 ++++++++++++------------ docs/en/12-taos-sql/12-distinguished.md | 151 ++-- docs/en/12-taos-sql/13-tmq.md | 47 +- docs/en/12-taos-sql/14-stream.md | 103 ++- docs/en/12-taos-sql/16-operators.md | 82 +-- docs/en/12-taos-sql/17-json.md | 57 +- docs/en/12-taos-sql/18-escape.md | 2 +- docs/en/12-taos-sql/19-limit.md | 76 +- docs/en/12-taos-sql/20-keywords.md | 289 ++++---- docs/en/12-taos-sql/21-node.md | 64 +- docs/en/12-taos-sql/22-meta.md | 324 ++++----- docs/en/12-taos-sql/23-perf.md | 129 ++++ docs/en/12-taos-sql/24-show.md | 76 +- docs/en/12-taos-sql/25-grant.md | 40 +- docs/en/12-taos-sql/26-udf.md | 70 +- docs/en/12-taos-sql/27-index.md | 28 +- docs/en/12-taos-sql/28-recovery.md | 22 +- docs/en/12-taos-sql/29-changes.md | 95 +++ docs/en/12-taos-sql/index.md | 16 +- 26 files changed, 2052 insertions(+), 1722 deletions(-) create mode 100644 docs/en/12-taos-sql/23-perf.md create mode 100644 docs/en/12-taos-sql/29-changes.md diff --git a/docs/en/12-taos-sql/01-data-type.md b/docs/en/12-taos-sql/01-data-type.md index d038219c8a..b830994ac9 100644 --- a/docs/en/12-taos-sql/01-data-type.md +++ b/docs/en/12-taos-sql/01-data-type.md @@ -1,9 +1,10 @@ --- +sidebar_label: Data Types title: Data Types description: "TDengine supports a variety of data types including timestamp, float, JSON and many others." --- -## TIMESTAMP +## Timestamp When using TDengine to store and query data, the most important part of the data is timestamp. Timestamp must be specified when creating and inserting data rows. Timestamp must follow the rules below: @@ -18,52 +19,54 @@ Time precision in TDengine can be set by the `PRECISION` parameter when executin ```sql CREATE DATABASE db_name PRECISION 'ns'; ``` - ## Data Types In TDengine, the data types below can be used when specifying a column or tag. | # | **type** | **Bytes** | **Description** | -| --- | :-------: | --------- | ------------------------- | -| 1 | TIMESTAMP | 8 | Default precision is millisecond, microsecond and nanosecond are also supported | +| --- | :-------: | --------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 1 | TIMESTAMP | 8 | Default precision is millisecond, microsecond and nanosecond are also supported | | 2 | INT | 4 | Integer, the value range is [-2^31, 2^31-1] | -| 3 |INT UNSIGNED|4 | Unsigned integer, the value range is [0, 2^31-1] | +| 3 | INT UNSIGNED| 4| unsigned integer, the value range is [0, 2^32-1] | 4 | BIGINT | 8 | Long integer, the value range is [-2^63, 2^63-1] | -| 5 | BIGINT UNSIGNED | 8 | Unsigned long integer, the value range is [0, 2^63-1] | +| 5 | BIGINT UNSIGNED | 8 | unsigned long integer, the value range is [0, 2^64-1] | | 6 | FLOAT | 4 | Floating point number, the effective number of digits is 6-7, the value range is [-3.4E38, 3.4E38] | | 7 | DOUBLE | 8 | Double precision floating point number, the effective number of digits is 15-16, the value range is [-1.7E308, 1.7E308] | -| 8 | BINARY | User Defined | Single-byte string for ASCII visible characters. Length must be specified when defining a column or tag of binary type. The string length can be up to 16374 bytes. The string value must be quoted with single quotes. The literal single quote inside the string must be preceded with back slash like `\'` | -| 9 | SMALLINT | 2 | Short integer, the value range is [-32768, 32767] | -| 10 | SMALLINT UNSIGNED | 2 | Unsigned short integer, the value range is [0, 32767] | -| 11 | TINYINT | 1 | Single-byte integer, the value range is [-128, 127] | -| 12 | TINYINT UNSIGNED | 1 | Unsigned single-byte integer, the value range is [0, 127] | -| 13 | BOOL | 1 | Bool, the value range is {true, false} | -| 14 | NCHAR | User Defined| Multi-Byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\’`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. | +| 8 | BINARY | User Defined | Single-byte string for ASCII visible characters. Length must be specified when defining a column or tag of binary type. | +| 9 | SMALLINT | 2 | Short integer, the value range is [-32768, 32767] | +| 10 | INT UNSIGNED| 2| unsigned integer, the value range is [0, 65535]| +| 11 | TINYINT | 1 | Single-byte integer, the value range is [-128, 127] | +| 12 | TINYINT UNSIGNED | 1 | unsigned single-byte integer, the value range is [0, 255] | +| 13 | BOOL | 1 | Bool, the value range is {true, false} | +| 14 | NCHAR | User Defined| Multi-Byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\’`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. | | 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type | -| 16 | VARCHAR | User Defined| Alias of BINARY type | +| 16 | VARCHAR | User-defined | Alias of BINARY | + :::note - TDengine is case insensitive and treats any characters in the sql command as lower case by default, case sensitive strings must be quoted with single quotes. -- Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type. +- Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type. +- The length of BINARY can be up to 16374 bytes. The string value must be quoted with single quotes. You must specify a length in bytes for a BINARY value, for example binary(20) for up to twenty single-byte characters. If the data exceeds the specified length, an error will occur. The literal single quote inside the string must be preceded with back slash like `\'` - Numeric values in SQL statements will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number. ::: + ## Constants -TDengine supports constants of multiple data type. +TDengine supports a variety of constants: | # | **Syntax** | **Type** | **Description** | | --- | :-------: | --------- | -------------------------------------- | -| 1 | [{+ \| -}]123 | BIGINT | Numeric constants are treated as BIGINT type. The value will be truncated if it exceeds the range of BIGINT type. | -| 2 | 123.45 | DOUBLE | Floating number constants are treated as DOUBLE type. TDengine determines whether it's a floating number based on if decimal point or scientific notation is used. | -| 3 | 1.2E3 | DOUBLE | Constants in scientific notation are treated ad DOUBLE type. | -| 4 | 'abc' | BINARY | String constants enclosed by single quotes are treated as BINARY type. Its size is determined as the acutal length. Single quote itself can be included by preceding backslash, i.e. `\'`, in a string constant. | -| 5 | "abc" | BINARY | String constants enclosed by double quotes are treated as BINARY type. Its size is determined as the acutal length. Double quote itself can be included by preceding backslash, i.e. `\"`, in a string constant. | -| 6 | TIMESTAMP {'literal' \| "literal"} | TIMESTAMP | A string constant following `TIMESTAMP` keyword is treated as TIMESTAMP type. The string should be in the format of "YYYY-MM-DD HH:mm:ss.MS". Its time precision is same as that of the current database being used. | -| 7 | {TRUE \| FALSE} | BOOL | BOOL type contant. | -| 8 | {'' \| "" \| '\t' \| "\t" \| ' ' \| " " \| NULL } | -- | NULL constant, it can be used for any type.| +| 1 | [{+ \| -}]123 | BIGINT | Integer literals are of type BIGINT. Data that exceeds the length of the BIGINT type is truncated. | +| 2 | 123.45 | DOUBLE | Floating-point literals are of type DOUBLE. Numeric values will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used. | +| 3 | 1.2E3 | DOUBLE | Literals in scientific notation are of type DOUBLE. | +| 4 | 'abc' | BINARY | Content enclosed in single quotation marks is of type BINARY. The size of a BINARY is the size of the string in bytes. A literal single quote inside the string must be escaped with a backslash (\'). | +| 5 | 'abc' | BINARY | Content enclosed in double quotation marks is of type BINARY. The size of a BINARY is the size of the string in bytes. A literal double quote inside the string must be escaped with a backslash (\"). | +| 6 | TIMESTAMP {'literal' \| "literal"} | TIMESTAMP | The TIMESTAMP keyword indicates that the following string literal is interpreted as a timestamp. The string must be in YYYY-MM-DD HH:mm:ss.MS format. The precision is inherited from the database configuration. | +| 7 | {TRUE \| FALSE} | BOOL | Boolean literals are of type BOOL. | +| 8 | {'' \| "" \| '\t' \| "\t" \| ' ' \| " " \| NULL } | -- | The preceding characters indicate null literals. These can be used with any data type. | :::note -- TDengine determines whether it's a floating number based on if decimal point or scientific notation is used. So whether the value is determined as overflow depends on both the value and the determined type. For example, 9999999999999999999 is determined as overflow because it exceeds the upper limit of BIGINT type, while 9999999999999999999.0 is considered as a valid floating number because it is within the range of DOUBLE type. +Numeric values will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number. ::: diff --git a/docs/en/12-taos-sql/02-database.md b/docs/en/12-taos-sql/02-database.md index c2961d6241..d9dadae976 100644 --- a/docs/en/12-taos-sql/02-database.md +++ b/docs/en/12-taos-sql/02-database.md @@ -4,123 +4,153 @@ title: Database description: "create and drop database, show or change database parameters" --- -## Create Database +## Create a Database + +```sql +CREATE DATABASE [IF NOT EXISTS] db_name [database_options] + +database_options: + database_option ... + +database_option: { + BUFFER value + | CACHEMODEL {'none' | 'last_row' | 'last_value' | 'both'} + | CACHESIZE value + | COMP {0 | 1 | 2} + | DURATION value + | WAL_FSYNC_PERIOD value + | MAXROWS value + | MINROWS value + | KEEP value + | PAGES value + | PAGESIZE value + | PRECISION {'ms' | 'us' | 'ns'} + | REPLICA value + | RETENTIONS ingestion_duration:keep_duration ... + | STRICT {'off' | 'on'} + | WAL_LEVEL {1 | 2} + | VGROUPS value + | SINGLE_STABLE {0 | 1} + | WAL_RETENTION_PERIOD value + | WAL_ROLL_PERIOD value + | WAL_RETENTION_SIZE value + | WAL_SEGMENT_SIZE value +} +``` + +## Parameters + +- BUFFER: specifies the size (in MB) of the write buffer for each vnode. Enter a value between 3 and 16384. The default value is 96. +- CACHEMODEL: specifies how the latest data in subtables is stored in the cache. The default value is none. + - none: The latest data is not cached. + - last_row: The last row of each subtable is cached. This option significantly improves the performance of the LAST_ROW function. + - last_value: The last non-null value of each column in each subtable is cached. This option significantly improves the performance of the LAST function under normal circumstances, such as statements including the WHERE, ORDER BY, GROUP BY, and INTERVAL keywords. + - both: The last row of each subtable and the last non-null value of each column in each subtable are cached. +- CACHESIZE: specifies the amount (in MB) of memory used for subtable caching on each vnode. Enter a value between 1 and 65536. The default value is 1. +- COMP: specifies how databases are compressed. The default value is 2. + - 0: Compression is disabled. + - 1: One-pass compression is enabled. + - 2: Two-pass compression is enabled. +- DURATION: specifies the time period contained in each data file. After the time specified by this parameter has elapsed, TDengine creates a new data file to store incoming data. You can use m (minutes), h (hours), and d (days) as the unit, for example DURATION 100h or DURATION 10d. If you do not include a unit, d is used by default. +- WAL_FSYNC_PERIOD: specifies the interval (in milliseconds) at which data is written from the WAL to disk. This parameter takes effect only when the WAL parameter is set to 2. The default value is 3000. Enter a value between 0 and 180000. The value 0 indicates that incoming data is immediately written to disk. +- MAXROWS: specifies the maximum number of rows recorded in a block. The default value is 4096. +- MINROWS: specifies the minimum number of rows recorded in a block. The default value is 100. +- KEEP: specifies the time for which data is retained. Enter a value between 1 and 365000. The default value is 3650. The value of the KEEP parameter must be greater than or equal to the value of the DURATION parameter. TDengine automatically deletes data that is older than the value of the KEEP parameter. You can use m (minutes), h (hours), and d (days) as the unit, for example KEEP 100h or KEEP 10d. If you do not include a unit, d is used by default. +- PAGES: specifies the number of pages in the metadata storage engine cache on each vnode. Enter a value greater than or equal to 64. The default value is 256. The space occupied by metadata storage on each vnode is equal to the product of the values of the PAGESIZE and PAGES parameters. The space occupied by default is 1 MB. +- PAGESIZE: specifies the size (in KB) of each page in the metadata storage engine cache on each vnode. The default value is 4. Enter a value between 1 and 16384. +- PRECISION: specifies the precision at which a database records timestamps. Enter ms for milliseconds, us for microseconds, or ns for nanoseconds. The default value is ms. +- REPLICA: specifies the number of replicas that are made of the database. Enter 1 or 3. The default value is 1. The value of the REPLICA parameter cannot exceed the number of dnodes in the cluster. +- RETENTIONS: specifies the retention period for data aggregated at various intervals. For example, RETENTIONS 15s:7d,1m:21d,15m:50d indicates that data aggregated every 15 seconds is retained for 7 days, data aggregated every 1 minute is retained for 21 days, and data aggregated every 15 minutes is retained for 50 days. You must enter three aggregation intervals and corresponding retention periods. +- STRICT: specifies whether strong data consistency is enabled. The default value is off. + - on: Strong consistency is enabled and implemented through the Raft consensus algorithm. In this mode, an operation is considered successful once it is confirmed by half of the nodes in the cluster. + - off: Strong consistency is disabled. In this mode, an operation is considered successful when it is initiated by the local node. +- WAL_LEVEL: specifies whether fsync is enabled. The default value is 1. + - 1: WAL is enabled but fsync is disabled. + - 2: WAL and fsync are both enabled. +- VGROUPS: specifies the initial number of vgroups when a database is created. +- SINGLE_STABLE: specifies whether the database can contain more than one supertable. + - 0: The database can contain multiple supertables. + - 1: The database can contain only one supertable. +- WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. Enter a time in seconds. The default value is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. +- WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. Enter a size in KB. The default value is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. +- WAL_ROLL_PERIOD: specifies the time after which WAL files are rotated. After this period elapses, a new WAL file is created. The default value is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk. +- WAL_SEGMENT_SIZE: specifies the maximum size of a WAL file. After the current WAL file reaches this size, a new WAL file is created. The default value is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk. + +### Example Statement + +```sql +create database if not exists db vgroups 10 buffer 10 + +``` + +The preceding SQL statement creates a database named db that has 10 vgroups and whose vnodes have a 10 MB cache. + +### Specify the Database in Use ``` -CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1]; +USE db_name; ``` -:::info - -1. KEEP specifies the number of days for which the data in the database will be retained. The default value is 3650 days, i.e. 10 years. The data will be deleted automatically once its age exceeds this threshold. -2. UPDATE specifies whether the data can be updated and how the data can be updated. - 1. UPDATE set to 0 means update operation is not allowed. The update for data with an existing timestamp will be discarded silently and the original record in the database will be preserved as is. - 2. UPDATE set to 1 means the whole row will be updated. The columns for which no value is specified will be set to NULL. - 3. UPDATE set to 2 means updating a subset of columns for a row is allowed. The columns for which no value is specified will be kept unchanged. -3. The maximum length of database name is 33 bytes. -4. The maximum length of a SQL statement is 65,480 bytes. -5. Below are the parameters that can be used when creating a database - - cache: [Description](/reference/config/#cache) - - blocks: [Description](/reference/config/#blocks) - - days: [Description](/reference/config/#days) - - keep: [Description](/reference/config/#keep) - - minRows: [Description](/reference/config/#minrows) - - maxRows: [Description](/reference/config/#maxrows) - - wal: [Description](/reference/config/#wallevel) - - fsync: [Description](/reference/config/#fsync) - - update: [Description](/reference/config/#update) - - cacheLast: [Description](/reference/config/#cachelast) - - replica: [Description](/reference/config/#replica) - - quorum: [Description](/reference/config/#quorum) - - comp: [Description](/reference/config/#comp) - - precision: [Description](/reference/config/#precision) -6. Please note that all of the parameters mentioned in this section are configured in configuration file `taos.cfg` on the TDengine server. If not specified in the `create database` statement, the values from taos.cfg are used by default. To override default parameters, they must be specified in the `create database` statement. - -::: +The preceding SQL statement switches to the specified database. (If you connect to TDengine over the REST API, this statement does not take effect.) -## Show Current Configuration +## Drop a Database ``` -SHOW VARIABLES; +DROP DATABASE [IF EXISTS] db_name ``` -## Specify The Database In Use +The preceding SQL statement deletes the specified database. This statement will delete all tables in the database and destroy all vgroups associated with it. Exercise caution when using this statement. -``` -USE db_name; -``` - -:::note -This way is not applicable when using a REST connection. In a REST connection the database name must be specified before a table or stable name. For e.g. to query the stable "meters" in database "test" the query would be "SELECT count(*) from test.meters" +## Change Database Configuration -::: +```sql +ALTER DATABASE db_name [alter_database_options] -## Drop Database +alter_database_options: + alter_database_option ... -``` -DROP DATABASE [IF EXISTS] db_name; +alter_database_option: { + CACHEMODEL {'none' | 'last_row' | 'last_value' | 'both'} + | CACHESIZE value + | WAL_LEVEL value + | WAL_FSYNC_PERIOD value + | KEEP value +} ``` :::note -All data in the database will be deleted too. This command must be used with extreme caution. Please follow your organization's data integrity, data backup, data security or any other applicable SOPs before using this command. +Other parameters cannot be modified after the database has been created. ::: -## Change Database Configuration +## View Databases -Some examples are shown below to demonstrate how to change the configuration of a database. Please note that some configuration parameters can be changed after the database is created, but some cannot. For details of the configuration parameters of database please refer to [Configuration Parameters](/reference/config/). +### View All Databases ``` -ALTER DATABASE db_name COMP 2; -``` - -COMP parameter specifies whether the data is compressed and how the data is compressed. - -``` -ALTER DATABASE db_name REPLICA 2; -``` - -REPLICA parameter specifies the number of replicas of the database. - -``` -ALTER DATABASE db_name KEEP 365; +SHOW DATABASES; ``` -KEEP parameter specifies the number of days for which the data will be kept. +### View the CREATE Statement for a Database ``` -ALTER DATABASE db_name QUORUM 2; +SHOW CREATE DATABASE db_name; ``` -QUORUM parameter specifies the necessary number of confirmations to determine whether the data is written successfully. +The preceding SQL statement can be used in migration scenarios. This command can be used to get the CREATE statement, which can be used in another TDengine instance to create the exact same database. -``` -ALTER DATABASE db_name BLOCKS 100; -``` +### View Database Configuration -BLOCKS parameter specifies the number of memory blocks used by each VNODE. - -``` -ALTER DATABASE db_name CACHELAST 0; +```sql +SHOW DATABASES \G; ``` -CACHELAST parameter specifies whether and how the latest data of a sub table is cached. +The preceding SQL statement shows the value of each parameter for the specified database. One value is displayed per line. -:::tip -The above parameters can be changed using `ALTER DATABASE` command without restarting. For more details of all configuration parameters please refer to [Configuration Parameters](/reference/config/). - -::: +## Delete Expired Data -## Show All Databases - -``` -SHOW DATABASES; -``` - -## Show The Create Statement of A Database - -``` -SHOW CREATE DATABASE db_name; +```sql +TRIM DATABASE db_name; ``` -This command is useful when migrating the data from one TDengine cluster to another. This command can be used to get the CREATE statement, which can be used in another TDengine instance to create the exact same database. +The preceding SQL statement deletes data that has expired and orders the remaining data in accordance with the storage configuration. diff --git a/docs/en/12-taos-sql/03-table.md b/docs/en/12-taos-sql/03-table.md index f065a8e239..bf32cf171b 100644 --- a/docs/en/12-taos-sql/03-table.md +++ b/docs/en/12-taos-sql/03-table.md @@ -1,127 +1,198 @@ --- -sidebar_label: Table title: Table -description: create super table, normal table and sub table, drop tables and change tables --- ## Create Table -``` -CREATE TABLE [IF NOT EXISTS] tb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]); -``` - -:::info +You create standard tables and subtables with the `CREATE TABLE` statement. + +```sql +CREATE TABLE [IF NOT EXISTS] [db_name.]tb_name (create_definition [, create_definitionn] ...) [table_options] + +CREATE TABLE create_subtable_clause + +CREATE TABLE [IF NOT EXISTS] [db_name.]tb_name (create_definition [, create_definitionn] ...) + [TAGS (create_definition [, create_definitionn] ...)] + [table_options] + +create_subtable_clause: { + create_subtable_clause [create_subtable_clause] ... + | [IF NOT EXISTS] [db_name.]tb_name USING [db_name.]stb_name [(tag_name [, tag_name] ...)] TAGS (tag_value [, tag_value] ...) +} + +create_definition: + col_name column_definition + +column_definition: + type_name [comment 'string_value'] + +table_options: + table_option ... + +table_option: { + COMMENT 'string_value' + | WATERMARK duration[,duration] + | MAX_DELAY duration[,duration] + | ROLLUP(func_name [, func_name] ...) + | SMA(col_name [, col_name] ...) + | TTL value +} + +``` + +**More explanations** 1. The first column of a table MUST be of type TIMESTAMP. It is automatically set as the primary key. 2. The maximum length of the table name is 192 bytes. 3. The maximum length of each row is 48k bytes, please note that the extra 2 bytes used by each BINARY/NCHAR column are also counted. 4. The name of the subtable can only consist of characters from the English alphabet, digits and underscore. Table names can't start with a digit. Table names are case insensitive. 5. The maximum length in bytes must be specified when using BINARY or NCHAR types. -6. Escape character "\`" can be used to avoid the conflict between table names and reserved keywords, above rules will be bypassed when using escape character on table names, but the upper limit for the name length is still valid. The table names specified using escape character are case sensitive. Only ASCII visible characters can be used with escape character. +6. Escape character "\`" can be used to avoid the conflict between table names and reserved keywords, above rules will be bypassed when using escape character on table names, but the upper limit for the name length is still valid. The table names specified using escape character are case sensitive. For example \`aBc\` and \`abc\` are different table names but `abc` and `aBc` are same table names because they are both converted to `abc` internally. + Only ASCII visible characters can be used with escape character. -::: +**Parameter description** +1. COMMENT: specifies comments for the table. This parameter can be used with supertables, standard tables, and subtables. +2. WATERMARK: specifies the time after which the window is closed. The default value is 5 seconds. Enter a value between 0 and 15 minutes in milliseconds, seconds, or minutes. You can enter multiple values separated by commas (,). This parameter applies only to supertables and takes effect only when the RETENTIONS parameter has been specified for the database. +3. MAX_DELAY: specifies the maximum latency for pushing computation results. The default value is 15 minutes or the value of the INTERVAL parameter, whichever is smaller. Enter a value between 0 and 15 minutes in milliseconds, seconds, or minutes. You can enter multiple values separated by commas (,). Note: Retain the default value if possible. Configuring a small MAX_DELAY may cause results to be frequently pushed, affecting storage and query performance. This parameter applies only to supertables and takes effect only when the RETENTIONS parameter has been specified for the database. +4. ROLLUP: specifies aggregate functions to roll up. Rolling up a function provides downsampled results based on multiple axes. This parameter applies only to supertables and takes effect only when the RETENTIONS parameter has been specified for the database. You can specify only one function to roll up. The rollup takes effect on all columns except TS. Enter one of the following values: avg, sum, min, max, last, or first. +5. SMA: specifies functions on which to enable small materialized aggregates (SMA). SMA is user-defined precomputation of aggregates based on data blocks. Enter one of the following values: max, min, or sum This parameter can be used with supertables and standard tables. +6. TTL: specifies the time to live (TTL) for the table. If the period specified by the TTL parameter elapses without any data being written to the table, TDengine will automatically delete the table. Note: The system may not delete the table at the exact moment that the TTL expires. Enter a value in days. The default value is 0. Note: The TTL parameter has a higher priority than the KEEP parameter. If a table is marked for deletion because the TTL has expired, it will be deleted even if the time specified by the KEEP parameter has not elapsed. This parameter can be used with standard tables and subtables. -### Create Subtable Using STable As Template +## Create Subtables -``` +### Create a Subtable + +```sql CREATE TABLE [IF NOT EXISTS] tb_name USING stb_name TAGS (tag_value1, ...); ``` -The above command creates a subtable using the specified super table as a template and the specified tag values. +### Create a Subtable with Specified Tags -### Create Subtable Using STable As Template With A Subset of Tags - -``` +```sql CREATE TABLE [IF NOT EXISTS] tb_name USING stb_name (tag_name1, ...) TAGS (tag_value1, ...); ``` -The tags for which no value is specified will be set to NULL. +The preceding SQL statement creates a subtable based on a supertable but specifies a subset of tags to use. Tags that are not included in this subset are assigned a null value. -### Create Tables in Batch +### Create Multiple Subtables -``` +```sql CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) [IF NOT EXISTS] tb_name2 USING stb_name TAGS (tag_value2, ...) ...; ``` -This can be used to create a lot of tables in a single SQL statement while making table creation much faster. +You can create multiple subtables in a single SQL statement provided that all subtables use the same supertable. For performance reasons, do not create more than 3000 tables per statement. + +## Modify a Table -:::info +```sql +ALTER TABLE [db_name.]tb_name alter_table_clause + +alter_table_clause: { + alter_table_options + | ADD COLUMN col_name column_type + | DROP COLUMN col_name + | MODIFY COLUMN col_name column_type + | RENAME COLUMN old_col_name new_col_name +} + +alter_table_options: + alter_table_option ... + +alter_table_option: { + TTL value + | COMMENT 'string_value' +} -- Creating tables in batch must use a super table as a template. -- The length of single statement is suggested to be between 1,000 and 3,000 bytes for best performance. +``` -::: +**More explanations** +You can perform the following modifications on existing tables: +1. ADD COLUMN: adds a column to the supertable. +2. DROP COLUMN: deletes a column from the supertable. +3. MODIFY COLUMN: changes the length of the data type specified for the column. Note that you can only specify a length greater than the current length. +4. RENAME COLUMN: renames a specified column in the table. -## Drop Tables +### Add a Column -``` -DROP TABLE [IF EXISTS] tb_name; +```sql +ALTER TABLE tb_name ADD COLUMN field_name data_type; ``` -## Show All Tables In Current Database +### Delete a Column -``` -SHOW TABLES [LIKE tb_name_wildcard]; +```sql +ALTER TABLE tb_name DROP COLUMN field_name; ``` -## Show Create Statement of A Table +### Modify the Data Length +```sql +ALTER TABLE tb_name MODIFY COLUMN field_name data_type(length); ``` -SHOW CREATE TABLE tb_name; -``` - -This is useful when migrating the data in one TDengine cluster to another one because it can be used to create the exact same tables in the target database. -## Show Table Definition +### Rename a Column +```sql +ALTER TABLE tb_name RENAME COLUMN old_col_name new_col_name ``` -DESCRIBE tb_name; + +## Modify a Subtable + +```sql +ALTER TABLE [db_name.]tb_name alter_table_clause + +alter_table_clause: { + alter_table_options + | SET TAG tag_name = new_tag_value +} + +alter_table_options: + alter_table_option ... + +alter_table_option: { + TTL value + | COMMENT 'string_value' +} ``` -## Change Table Definition +**More explanations** +1. Only the value of a tag can be modified directly. For all other modifications, you must modify the supertable from which the subtable was created. -### Add A Column +### Change Tag Value Of Sub Table ``` -ALTER TABLE tb_name ADD COLUMN field_name data_type; +ALTER TABLE tb_name SET TAG tag_name=new_tag_value; ``` -:::info +## Delete a Table -1. The maximum number of columns is 4096, the minimum number of columns is 2. -2. The maximum length of a column name is 64 bytes. +The following SQL statement deletes one or more tables. -::: - -### Remove A Column - -``` -ALTER TABLE tb_name DROP COLUMN field_name; +```sql +DROP TABLE [IF EXISTS] [db_name.]tb_name [, [IF EXISTS] [db_name.]tb_name] ... ``` -:::note -If a table is created using a super table as template, the table definition can only be changed on the corresponding super table, and the change will be automatically applied to all the subtables created using this super table as template. For tables created in the normal way, the table definition can be changed directly on the table. +## View Tables -::: +### View All Tables -### Change Column Length +The following SQL statement shows all tables in the current database. -``` -ALTER TABLE tb_name MODIFY COLUMN field_name data_type(length); +```sql +SHOW TABLES [LIKE tb_name_wildchar]; ``` -If the type of a column is variable length, like BINARY or NCHAR, this command can be used to change the length of the column. +### View the CREATE Statement for a Table -:::note -If a table is created using a super table as template, the table definition can only be changed on the corresponding super table, and the change will be automatically applied to all the subtables created using this super table as template. For tables created in the normal way, the table definition can be changed directly on the table. +``` +SHOW CREATE TABLE tb_name; +``` -::: +This command is useful in migrating data from one TDengine cluster to another because it can be used to create the exact same tables in the target database. -### Change Tag Value Of Sub Table +## View the Table Schema ``` -ALTER TABLE tb_name SET TAG tag_name=new_tag_value; -``` - -This command can be used to change the tag value if the table is created using a super table as template. +DESCRIBE [db_name.]tb_name; +``` \ No newline at end of file diff --git a/docs/en/12-taos-sql/04-stable.md b/docs/en/12-taos-sql/04-stable.md index b8a608792a..6a0a0922cc 100644 --- a/docs/en/12-taos-sql/04-stable.md +++ b/docs/en/12-taos-sql/04-stable.md @@ -1,118 +1,159 @@ --- -sidebar_label: STable -title: Super Table +sidebar_label: Supertable +title: Supertable --- -:::note +## Create a Supertable -Keyword `STable`, abbreviated for super table, is supported since version 2.0.15. +```sql +CREATE STABLE [IF NOT EXISTS] stb_name (create_definition [, create_definitionn] ...) TAGS (create_definition [, create_definition] ...) [table_options] + +create_definition: + col_name column_definition + +column_definition: + type_name [COMMENT 'string_value'] +``` -::: +**More explanations** +- Each supertable can have a maximum of 4096 columns, including tags. The minimum number of columns is 3: a timestamp column used as the key, one tag column, and one data column. +- When you create a supertable, you can add comments to columns and tags. +- The TAGS keyword defines the tag columns for the supertable. The following restrictions apply to tag columns: + - A tag column can use the TIMESTAMP data type, but the values in the column must be fixed numbers. Timestamps including formulae, such as "now + 10s", cannot be stored in a tag column. + - The name of a tag column cannot be the same as the name of any other column. + - The name of a tag column cannot be a reserved keyword. + - Each supertable must contain between 1 and 128 tags. The total length of the TAGS keyword cannot exceed 16 KB. +- For more information about table parameters, see Create a Table. -## Create STable +## View a Supertable + +### View All Supertables ``` -CREATE STable [IF NOT EXISTS] stb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]) TAGS (tag1_name tag_type1, tag2_name tag_type2 [, tag3_name tag_type3]); +SHOW STABLES [LIKE tb_name_wildcard]; ``` -The SQL statement of creating a STable is similar to that of creating a table, but a special column set named `TAGS` must be specified with the names and types of the tags. - -:::info +The preceding SQL statement shows all supertables in the current TDengine database, including the name, creation time, number of columns, number of tags, and number of subtabels for each supertable. -1. A tag can be of type timestamp, since version 2.1.3.0, but its value must be fixed and arithmetic operations cannot be performed on it. Prior to version 2.1.3.0, tag types specified in TAGS could not be of type timestamp. -2. The tag names specified in TAGS should NOT be the same as other columns. -3. The tag names specified in TAGS should NOT be the same as any reserved keywords.(Please refer to [keywords](/taos-sql/keywords/) -4. The maximum number of tags specified in TAGS is 128, there must be at least one tag, and the total length of all tag columns should NOT exceed 16KB. - -::: - -## Drop STable +### View the CREATE Statement for a Supertable ``` -DROP STable [IF EXISTS] stb_name; +SHOW CREATE STABLE stb_name; ``` -All the subtables created using the deleted STable will be deleted automatically. +The preceding SQL statement can be used in migration scenarios. It returns the CREATE statement that was used to create the specified supertable. You can then use the returned statement to create an identical supertable on another TDengine database. -## Show All STables +## View the Supertable Schema ``` -SHOW STableS [LIKE tb_name_wildcard]; +DESCRIBE [db_name.]stb_name; ``` -This command can be used to display the information of all STables in the current database, including name, creation time, number of columns, number of tags, and number of tables created using this STable. - -## Show The Create Statement of A STable +## Drop STable ``` -SHOW CREATE STable stb_name; +DROP STABLE [IF EXISTS] [db_name.]stb_name ``` -This command is useful in migrating data from one TDengine cluster to another because it can be used to create the exact same STable in the target database. +Note: Deleting a supertable will delete all subtables created from the supertable, including all data within those subtables. -## Get STable Definition +## Modify a Supertable + +```sql +ALTER STABLE [db_name.]tb_name alter_table_clause + +alter_table_clause: { + alter_table_options + | ADD COLUMN col_name column_type + | DROP COLUMN col_name + | MODIFY COLUMN col_name column_type + | ADD TAG tag_name tag_type + | DROP TAG tag_name + | MODIFY TAG tag_name tag_type + | RENAME TAG old_tag_name new_tag_name +} + +alter_table_options: + alter_table_option ... + +alter_table_option: { + COMMENT 'string_value' +} ``` -DESCRIBE stb_name; -``` -## Change Columns Of STable +**More explanations** + +Modifications to the table schema of a supertable take effect on all subtables within the supertable. You cannot modify the table schema of subtables individually. When you modify the tag schema of a supertable, the modifications automatically take effect on all of its subtables. + +- ADD COLUMN: adds a column to the supertable. +- DROP COLUMN: deletes a column from the supertable. +- MODIFY COLUMN: changes the length of a BINARY or NCHAR column. Note that you can only specify a length greater than the current length. +- ADD TAG: adds a tag to the supertable. +- DROP TAG: deletes a tag from the supertable. When you delete a tag from a supertable, it is automatically deleted from all subtables within the supertable. +- MODIFY TAG: modifies the definition of a tag in the supertable. You can use this keyword to change the length of a BINARY or NCHAR tag column. Note that you can only specify a length greater than the current length. +- RENAME TAG: renames a specified tag in the supertable. When you rename a tag in a supertable, it is automatically renamed in all subtables within the supertable. -### Add A Column +### Add a Column ``` -ALTER STable stb_name ADD COLUMN field_name data_type; +ALTER STABLE stb_name ADD COLUMN col_name column_type; ``` -### Remove A Column +### Delete a Column ``` -ALTER STable stb_name DROP COLUMN field_name; +ALTER STABLE stb_name DROP COLUMN col_name; ``` -### Change Column Length +### Modify the Data Length ``` -ALTER STable stb_name MODIFY COLUMN field_name data_type(length); +ALTER STABLE stb_name MODIFY COLUMN col_name data_type(length); ``` -This command can be used to change (or more specifically, increase) the length of a column of variable length types, like BINARY or NCHAR. - -## Change Tags of A STable +The preceding SQL statement changes the length of a BINARY or NCHAR data column. Note that you can only specify a length greater than the current length. ### Add A Tag ``` -ALTER STable stb_name ADD TAG new_tag_name tag_type; +ALTER STABLE stb_name ADD TAG tag_name tag_type; ``` -This command is used to add a new tag for a STable and specify the tag type. +The preceding SQL statement adds a tag of the specified type to the supertable. A supertable cannot contain more than 128 tags. The total length of all tags in a supertable cannot exceed 16 KB. ### Remove A Tag ``` -ALTER STable stb_name DROP TAG tag_name; +ALTER STABLE stb_name DROP TAG tag_name; ``` -The tag will be removed automatically from all the subtables, created using the super table as template, once a tag is removed from a super table. +The preceding SQL statement deletes a tag from the supertable. When you delete a tag from a supertable, it is automatically deleted from all subtables within the supertable. ### Change A Tag ``` -ALTER STable stb_name CHANGE TAG old_tag_name new_tag_name; +ALTER STABLE stb_name RENAME TAG old_tag_name new_tag_name; ``` -The tag name will be changed automatically for all the subtables, created using the super table as template, once a tag name is changed for a super table. +The preceding SQL statement renames a tag in the supertable. When you rename a tag in a supertable, it is automatically renamed in all subtables within the supertable. ### Change Tag Length ``` -ALTER STable stb_name MODIFY TAG tag_name data_type(length); +ALTER STABLE stb_name MODIFY TAG tag_name data_type(length); ``` -This command can be used to change (or more specifically, increase) the length of a tag of variable length types, like BINARY or NCHAR. +The preceding SQL statement changes the length of a BINARY or NCHAR tag column. Note that you can only specify a length greater than the current length. (Available in 2.1.3.0 and later versions) + +### View a Supertable +You can run projection and aggregate SELECT queries on supertables, and you can filter by tag or column by using the WHERE keyword. + +If you do not include an ORDER BY clause, results are returned by subtable. These results are not ordered. You can include an ORDER BY clause in your query to strictly order the results. + + :::note -Changing tag values can be applied to only subtables. All other tag operations, like add tag, remove tag, however, can be applied to only STable. If a new tag is added for a STable, the tag will be added with NULL value for all its subtables. +All tag operations except for updating the value of a tag must be performed on the supertable and not on individual subtables. If you add a tag to an existing supertable, the tag is automatically added with a null value to all subtables within the supertable. ::: diff --git a/docs/en/12-taos-sql/05-insert.md b/docs/en/12-taos-sql/05-insert.md index 1336cd7238..e7d56fb3c7 100644 --- a/docs/en/12-taos-sql/05-insert.md +++ b/docs/en/12-taos-sql/05-insert.md @@ -1,4 +1,5 @@ --- +sidebar_label: Insert title: Insert --- @@ -17,47 +18,62 @@ INSERT INTO ...]; ``` -## Insert Single or Multiple Rows +**Timestamps** -Single row or multiple rows specified with VALUES can be inserted into a specific table. For example: +1. All data writes must include a timestamp. With regard to timestamps, note the following: -A single row is inserted using the below statement. +2. The precision of a timestamp depends on its format. The precision configured for the database affects only timestamps that are inserted as long integers (UNIX time). Timestamps inserted as date and time strings are not affected. As an example, the timestamp 2021-07-13 16:16:48 is equivalent to 1626164208 in UNIX time. This UNIX time is modified to 1626164208000 for databases with millisecond precision, 1626164208000000 for databases with microsecond precision, and 1626164208000000000 for databases with nanosecond precision. -```sq; +3. If you want to insert multiple rows simultaneously, do not use the NOW function in the timestamp. Using the NOW function in this situation will cause multiple rows to have the same timestamp and prevent them from being stored correctly. This is because the NOW function obtains the current time on the client, and multiple instances of NOW in a single statement will return the same time. + The earliest timestamp that you can use when inserting data is equal to the current time on the server minus the value of the KEEP parameter. The latest timestamp that you can use when inserting data is equal to the current time on the server plus the value of the DURATION parameter. You can configure the KEEP and DURATION parameters when you create a database. The default values are 3650 days for the KEEP parameter and 10 days for the DURATION parameter. + +**Syntax** + +1. The USING clause automatically creates the specified subtable if it does not exist. If it's unknown whether the table already exists, the table can be created automatically while inserting using the SQL statement below. To use this functionality, a STable must be used as template and tag values must be provided. Any tags that you do not specify will be assigned a null value. + +2. You can insert data into specified columns. Any columns in which you do not insert data will be assigned a null value. + +3. The VALUES clause inserts one or more rows of data into a table. + +4. The FILE clause inserts tags or data from a comma-separates values (CSV) file. Do not include headers in your CSV files. + +5. A single INSERT statement can write data to multiple tables. + +6. The INSERT statement is fully parsed before being executed, so that if any element of the statement fails, the entire statement will fail. For example, the following statement will not create a table because the latter part of the statement is invalid: + + ```sql + INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2) VALUES('a'); + ``` + +7. However, an INSERT statement that writes data to multiple subtables can succeed for some tables and fail for others. This situation is caused because vnodes perform write operations independently of each other. One vnode failing to write data does not affect the ability of other vnodes to write successfully. + +## Insert a Record + +Single row or multiple rows specified with VALUES can be inserted into a specific table. A single row is inserted using the below statement. + +```sql INSERT INTO d1001 VALUES (NOW, 10.2, 219, 0.32); ``` +## Insert Multiple Records + Double rows are inserted using the below statement. ```sql INSERT INTO d1001 VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32) (1626164208000, 10.15, 217, 0.33); ``` -:::note - -1. In the second example above, different formats are used in the two rows to be inserted. In the first row, the timestamp format is a date and time string, which is interpreted from the string value only. In the second row, the timestamp format is a long integer, which will be interpreted based on the database time precision. -2. When trying to insert multiple rows in a single statement, only the timestamp of one row can be set as NOW, otherwise there will be duplicate timestamps among the rows and the result may be out of expectation because NOW will be interpreted as the time when the statement is executed. -3. The oldest timestamp that is allowed is subtracting the KEEP parameter from current time. -4. The newest timestamp that is allowed is adding the DAYS parameter to current time. - -::: - -## Insert Into Specific Columns +## Write to a Specified Column -Data can be inserted into specific columns, either single row or multiple row, while other columns will be inserted as NULL value. +Data can be inserted into specific columns, either single row or multiple row, while other columns will be inserted as NULL value. The key (timestamp) cannot be null. For example: -``` +```sql INSERT INTO d1001 (ts, current, phase) VALUES ('2021-07-13 14:06:33.196', 10.27, 0.31); ``` -:::info -If no columns are explicitly specified, all the columns must be provided with values, this is called "all column mode". The insert performance of all column mode is much better than specifying a subset of columns, so it's encouraged to use "all column mode" while providing NULL value explicitly for the columns for which no actual value can be provided. - -::: - ## Insert Into Multiple Tables -One or multiple rows can be inserted into multiple tables in a single SQL statement, with or without specifying specific columns. +One or multiple rows can be inserted into multiple tables in a single SQL statement, with or without specifying specific columns. For example: ```sql INSERT INTO d1001 VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33) @@ -66,19 +82,19 @@ INSERT INTO d1001 VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07- ## Automatically Create Table When Inserting -If it's unknown whether the table already exists, the table can be created automatically while inserting using the SQL statement below. To use this functionality, a STable must be used as template and tag values must be provided. +If it's unknown whether the table already exists, the table can be created automatically while inserting using the SQL statement below. To use this functionality, a STable must be used as template and tag values must be provided. For example: ```sql INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32); ``` -It's not necessary to provide values for all tags when creating tables automatically, the tags without values provided will be set to NULL. +It's not necessary to provide values for all tags when creating tables automatically, the tags without values provided will be set to NULL. For example: ```sql INSERT INTO d21001 USING meters (groupId) TAGS (2) VALUES ('2021-07-13 14:06:33.196', 10.15, 217, 0.33); ``` -Multiple rows can also be inserted into the same table in a single SQL statement. +Multiple rows can also be inserted into the same table in a single SQL statement. For example: ```sql INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33) @@ -86,10 +102,6 @@ INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) VALUES ('202 d21003 USING meters (groupId) TAGS (2) (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31); ``` -:::info -Prior to version 2.0.20.5, when using `INSERT` to create tables automatically and specifying the columns, the column names must follow the table name immediately. From version 2.0.20.5, the column names can follow the table name immediately, also can be put between `TAGS` and `VALUES`. In the same SQL statement, however, these two ways of specifying column names can't be mixed. -::: - ## Insert Rows From A File Besides using `VALUES` to insert one or multiple rows, the data to be inserted can also be prepared in a CSV file with comma as separator and each field value quoted by single quotes. Table definition is not required in the CSV file. For example, if file "/tmp/csvfile.csv" contains the below data: @@ -107,58 +119,13 @@ INSERT INTO d1001 FILE '/tmp/csvfile.csv'; ## Create Tables Automatically and Insert Rows From File -From version 2.1.5.0, tables can be automatically created using a super table as template when inserting data from a CSV file, like below: - ```sql INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile.csv'; ``` -Multiple tables can be automatically created and inserted in a single SQL statement, like below: +When writing data from a file, you can automatically create the specified subtable if it does not exist. For example: ```sql INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile_21001.csv' d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv'; ``` - -## More About Insert - -For SQL statement like `insert`, a stream parsing strategy is applied. That means before an error is found and the execution is aborted, the part prior to the error point has already been executed. Below is an experiment to help understand the behavior. - -First, a super table is created. - -```sql -CREATE TABLE meters(ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS(location BINARY(30), groupId INT); -``` - -It can be proven that the super table has been created by `SHOW STableS`, but no table exists using `SHOW TABLES`. - -``` -taos> SHOW STableS; - name | created_time | columns | tags | tables | -============================================================================================ - meters | 2020-08-06 17:50:27.831 | 4 | 2 | 0 | -Query OK, 1 row(s) in set (0.001029s) - -taos> SHOW TABLES; -Query OK, 0 row(s) in set (0.000946s) -``` - -Then, try to create table d1001 automatically when inserting data into it. - -```sql -INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('a'); -``` - -The output shows the value to be inserted is invalid. But `SHOW TABLES` proves that the table has been created automatically by the `INSERT` statement. - -``` -DB error: invalid SQL: 'a' (invalid timestamp) (0.039494s) - -taos> SHOW TABLES; - table_name | created_time | columns | STable_name | -====================================================================================================== - d1001 | 2020-08-06 17:52:02.097 | 4 | meters | -Query OK, 1 row(s) in set (0.001091s) -``` - -From the above experiment, we can see that while the value to be inserted is invalid the table is still created. diff --git a/docs/en/12-taos-sql/06-select.md b/docs/en/12-taos-sql/06-select.md index 8a017cf92e..439205696b 100644 --- a/docs/en/12-taos-sql/06-select.md +++ b/docs/en/12-taos-sql/06-select.md @@ -1,118 +1,124 @@ --- +sidebar_label: Select title: Select --- ## Syntax -```SQL -SELECT select_expr [, select_expr ...] - FROM {tb_name_list} - [WHERE where_condition] - [SESSION(ts_col, tol_val)] - [STATE_WINDOW(col)] - [INTERVAL(interval_val [, interval_offset]) [SLIDING sliding_val]] - [FILL(fill_mod_and_val)] - [GROUP BY col_list] - [ORDER BY col_list { DESC | ASC }] +```sql +SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW() | TODAY() | TIMEZONE()} + +SELECT [DISTINCT] select_list + from_clause + [WHERE condition] + [PARTITION BY tag_list] + [window_clause] + [group_by_clause] + [order_by_clasue] [SLIMIT limit_val [SOFFSET offset_val]] [LIMIT limit_val [OFFSET offset_val]] - [>> export_file]; -``` + [>> export_file] -## Wildcard +select_list: + select_expr [, select_expr] ... -Wildcard \* can be used to specify all columns. The result includes only data columns for normal tables. +select_expr: { + * + | query_name.* + | [schema_name.] {table_name | view_name} .* + | t_alias.* + | expr [[AS] c_alias] +} -``` -taos> SELECT * FROM d1001; - ts | current | voltage | phase | -====================================================================================== - 2018-10-03 14:38:05.000 | 10.30000 | 219 | 0.31000 | - 2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 | - 2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 | -Query OK, 3 row(s) in set (0.001165s) -``` +from_clause: { + table_reference [, table_reference] ... + | join_clause [, join_clause] ... +} + +table_reference: + table_expr t_alias + +table_expr: { + table_name + | view_name + | ( subquery ) +} -The result includes both data columns and tag columns for super table. +join_clause: + table_reference [INNER] JOIN table_reference ON condition +window_clause: { + SESSION(ts_col, tol_val) + | STATE_WINDOW(col) + | INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)] + +changes_option: { + DURATION duration_val + | ROWS rows_val +} + +group_by_clause: + GROUP BY expr [, expr] ... HAVING condition + +order_by_clasue: + ORDER BY order_expr [, order_expr] ... + +order_expr: + {expr | position | c_alias} [DESC | ASC] [NULLS FIRST | NULLS LAST] ``` -taos> SELECT * FROM meters; - ts | current | voltage | phase | location | groupid | -===================================================================================================================================== - 2018-10-03 14:38:05.500 | 11.80000 | 221 | 0.28000 | California.LoSangeles | 2 | - 2018-10-03 14:38:16.600 | 13.40000 | 223 | 0.29000 | California.LoSangeles | 2 | - 2018-10-03 14:38:05.000 | 10.80000 | 223 | 0.29000 | California.LoSangeles | 3 | - 2018-10-03 14:38:06.500 | 11.50000 | 221 | 0.35000 | California.LoSangeles | 3 | - 2018-10-03 14:38:04.000 | 10.20000 | 220 | 0.23000 | California.SanFrancisco | 3 | - 2018-10-03 14:38:16.650 | 10.30000 | 218 | 0.25000 | California.SanFrancisco | 3 | - 2018-10-03 14:38:05.000 | 10.30000 | 219 | 0.31000 | California.SanFrancisco | 2 | - 2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 | California.SanFrancisco | 2 | - 2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 | California.SanFrancisco | 2 | -Query OK, 9 row(s) in set (0.002022s) + +## Lists + +A query can be performed on some or all columns. Data and tag columns can all be included in the SELECT list. + +## Wildcards + +You can use an asterisk (\*) as a wildcard character to indicate all columns. For standard tables, the asterisk indicates only data columns. For supertables and subtables, tag columns are also included. + +```sql +SELECT * FROM d1001; ``` -Wildcard can be used with table name as prefix. Both SQL statements below have the same effect and return all columns. +You can use a table name as a prefix before an asterisk. For example, the following SQL statements both return all columns from the d1001 table: -```SQL +```sql SELECT * FROM d1001; SELECT d1001.* FROM d1001; ``` -In a JOIN query, however, the results are different with or without a table name prefix. \* without table prefix will return all the columns of both tables, but \* with table name as prefix will return only the columns of that table. +However, in a JOIN query, using a table name prefix with an asterisk returns different results. In this case, querying * returns all data in all columns in all tables (not including tags), whereas using a table name prefix returns all data in all columns in the specified table only. -``` -taos> SELECT * FROM d1001, d1003 WHERE d1001.ts=d1003.ts; - ts | current | voltage | phase | ts | current | voltage | phase | -================================================================================================================================== - 2018-10-03 14:38:05.000 | 10.30000| 219 | 0.31000 | 2018-10-03 14:38:05.000 | 10.80000| 223 | 0.29000 | -Query OK, 1 row(s) in set (0.017385s) +```sql +SELECT * FROM d1001, d1003 WHERE d1001.ts=d1003.ts; +SELECT d1001.* FROM d1001,d1003 WHERE d1001.ts = d1003.ts; ``` -``` -taos> SELECT d1001.* FROM d1001,d1003 WHERE d1001.ts = d1003.ts; - ts | current | voltage | phase | -====================================================================================== - 2018-10-03 14:38:05.000 | 10.30000 | 219 | 0.31000 | -Query OK, 1 row(s) in set (0.020443s) -``` +The first of the preceding SQL statements returns all columns from the d1001 and d1003 tables, but the second of the preceding SQL statements returns all columns from the d1001 table only. -Wildcard \* can be used with some functions, but the result may be different depending on the function being used. For example, `count(*)` returns only one column, i.e. the number of rows; `first`, `last` and `last_row` return all columns of the selected row. +With regard to the other SQL functions that support wildcards, the differences are as follows: +`count(*)` only returns one column. `first`, `last`, and `last_row` return all columns. -``` -taos> SELECT COUNT(*) FROM d1001; - count(*) | -======================== - 3 | -Query OK, 1 row(s) in set (0.001035s) -``` +### Tag Columns -``` -taos> SELECT FIRST(*) FROM d1001; - first(ts) | first(current) | first(voltage) | first(phase) | -========================================================================================= - 2018-10-03 14:38:05.000 | 10.30000 | 219 | 0.31000 | -Query OK, 1 row(s) in set (0.000849s) +You can query tag columns in supertables and subtables and receive results in the same way as querying data columns. + +```sql +SELECT location, groupid, current FROM d1001 LIMIT 2; ``` -## Tags +### Distinct Values -Starting from version 2.0.14, tag columns can be selected together with data columns when querying sub tables. Please note however, that, wildcard \* cannot be used to represent any tag column. This means that tag columns must be specified explicitly like the example below. +The DISTINCT keyword returns only values that are different over one or more columns. You can use the DISTINCT keyword with tag columns and data columns. -``` -taos> SELECT location, groupid, current FROM d1001 LIMIT 2; - location | groupid | current | -====================================================================== - California.SanFrancisco | 2 | 10.30000 | - California.SanFrancisco | 2 | 12.60000 | -Query OK, 2 row(s) in set (0.003112s) -``` +The following SQL statement returns distinct values from a tag column: -## Get distinct values +```sql +SELECT DISTINCT tag_name [, tag_name ...] FROM stb_name; +``` -`DISTINCT` keyword can be used to get all the unique values of tag columns from a super table. It can also be used to get all the unique values of data columns from a table or subtable. +The following SQL statement returns distinct values from a data column: ```sql -SELECT DISTINCT tag_name [, tag_name ...] FROM stb_name; SELECT DISTINCT col_name [, col_name ...] FROM tb_name; ``` @@ -124,231 +130,188 @@ SELECT DISTINCT col_name [, col_name ...] FROM tb_name; ::: -## Columns Names of Result Set +### Column Names -When using `SELECT`, the column names in the result set will be the same as that in the select clause if `AS` is not used. `AS` can be used to rename the column names in the result set. For example +When using `SELECT`, the column names in the result set will be the same as that in the select clause if `AS` is not used. `AS` can be used to rename the column names in the result set. For example: -``` +```sql taos> SELECT ts, ts AS primary_key_ts FROM d1001; - ts | primary_key_ts | -==================================================== - 2018-10-03 14:38:05.000 | 2018-10-03 14:38:05.000 | - 2018-10-03 14:38:15.000 | 2018-10-03 14:38:15.000 | - 2018-10-03 14:38:16.800 | 2018-10-03 14:38:16.800 | -Query OK, 3 row(s) in set (0.001191s) ``` `AS` can't be used together with `first(*)`, `last(*)`, or `last_row(*)`. -## Implicit Columns +### Pseudocolumns + +**TBNAME** +The TBNAME pseudocolumn in a supertable contains the names of subtables within the supertable. -`Select_exprs` can be column names of a table, or function expression or arithmetic expression on columns. The maximum number of allowed column names and expressions is 256. Timestamp and the corresponding tag names will be returned in the result set if `interval` or `group by tags` are used, and timestamp will always be the first column in the result set. +The following SQL statement returns all unique subtable names and locations within the meters supertable: -## Table List +```mysql +SELECT DISTINCT TBNAME, location FROM meters; +``` -`FROM` can be followed by a number of tables or super tables, or can be followed by a sub-query. If no database is specified as current database in use, table names must be preceded with database name, like `power.d1001`. +Use the `INS_TAGS` system table in `INFORMATION_SCHEMA` to query the information for subtables in a supertable. For example, the following statement returns the name and tag values for each subtable in the `meters` supertable. -```SQL -SELECT * FROM power.d1001; +```mysql +SELECT table_name, tag_name, tag_type, tag_value FROM information_schema.ins_tags WHERE stable_name='meters'; ``` -has same effect as +The following SQL statement returns the number of subtables within the meters supertable. -```SQL -USE power; -SELECT * FROM d1001; +```mysql +SELECT COUNT(*) FROM (SELECT DISTINCT TBNAME FROM meters); ``` -## Special Query +In the preceding two statements, only tags can be used as filtering conditions in the WHERE clause. For example: -Some special query functions can be invoked without `FROM` sub-clause. For example, the statement below can be used to get the current database in use. +**\_QSTART and \_QEND** -``` -taos> SELECT DATABASE(); - database() | -================================= - power | -Query OK, 1 row(s) in set (0.000079s) -``` +The \_QSTART and \_QEND pseudocolumns contain the beginning and end of the time range of a query. If the WHERE clause in a statement does not contain valid timestamps, the time range is equal to [-2^63, 2^63 - 1]. -If no database is specified upon logging in and no database is specified with `USE` after login, NULL will be returned by `select database()`. +The \_QSTART and \_QEND pseudocolumns cannot be used in a WHERE clause. -``` -taos> SELECT DATABASE(); - database() | -================================= - NULL | -Query OK, 1 row(s) in set (0.000184s) -``` +**\_WSTART, \_WEND, and \_DURATION** +\_WSTART, \_WEND, and \_WDURATION pseudocolumns +The \_WSTART, \_WEND, and \_WDURATION pseudocolumns indicate the beginning, end, and duration of a window. -The statement below can be used to get the version of client or server. +These pseudocolumns can be used only in time window-based aggregations and must occur after the aggregation clause. -``` -taos> SELECT CLIENT_VERSION(); - client_version() | -=================== - 2.0.0.0 | -Query OK, 1 row(s) in set (0.000070s) +**\_c0 and \_ROWTS** -taos> SELECT SERVER_VERSION(); - server_version() | -=================== - 2.0.0.0 | -Query OK, 1 row(s) in set (0.000077s) +In TDengine, the first column of all tables must be a timestamp. This column is the primary key of the table. The \_c0 and \_ROWTS pseudocolumns both represent the values of this column. These pseudocolumns enable greater flexibility and standardization. For example, you can use functions such as MAX and MIN with these pseudocolumns. + +```sql +select _rowts, max(current) from meters; ``` -The statement below is used to check the server status. An integer, like `1`, is returned if the server status is OK, otherwise an error code is returned. This is compatible with the status check for TDengine from connection pool or 3rd party tools, and can avoid the problem of losing the connection from a connection pool when using the wrong heartbeat checking SQL statement. +## Query Objects -``` -taos> SELECT SERVER_STATUS(); - server_status() | -================== - 1 | -Query OK, 1 row(s) in set (0.000074s) +`FROM` can be followed by a number of tables or super tables, or can be followed by a sub-query. +If no database is specified as current database in use, table names must be preceded with database name, for example, `power.d1001`. -taos> SELECT SERVER_STATUS() AS status; - status | -============== - 1 | -Query OK, 1 row(s) in set (0.000081s) -``` +You can perform INNER JOIN statements based on the primary key. The following conditions apply: -## \_block_dist +1. You can use FROM table list or an explicit JOIN clause. +2. For standard tables and subtables, you must specify an ON condition and the condition must be equivalent to the primary key. +3. For supertables, the ON condition must be equivalent to the primary key. In addition, the tag columns of the tables on which the INNER JOIN is performed must have a one-to-one relationship. You cannot specify an OR condition. +4. The tables that are included in a JOIN clause must be of the same type (supertable, standard table, or subtable). +5. You can include subqueries before and after the JOIN keyword. +6. You cannot include more than ten tables in a JOIN clause. +7. You cannot include a FILL clause and a JOIN clause in the same statement. -**Description**: Get the data block distribution of a table or STable. +## GROUP BY -```SQL title="Syntax" -SELECT _block_dist() FROM { tb_name | stb_name } -``` +If you use a GROUP BY clause, the SELECT list can only include the following items: -**Restrictions**:No argument is allowed, where clause is not allowed +1. Constants +2. Aggregate functions +3. Expressions that are consistent with the expression following the GROUP BY clause +4. Expressions that include the preceding expression -**Sub Query**:Sub query or nested query are not supported +The GROUP BY clause groups each row of data by the value of the expression following the clause and returns a combined result for each group. -**Return value**: A string which includes the data block distribution of the specified table or STable, i.e. the histogram of rows stored in the data blocks of the table or STable. +The expressions in a GROUP BY clause can include any column in any table or view. It is not necessary that the expressions appear in the SELECT list. -```text title="Result" -summary: -5th=[392], 10th=[392], 20th=[392], 30th=[392], 40th=[792], 50th=[792] 60th=[792], 70th=[792], 80th=[792], 90th=[792], 95th=[792], 99th=[792] Min=[392(Rows)] Max=[800(Rows)] Avg=[666(Rows)] Stddev=[2.17] Rows=[2000], Blocks=[3], Size=[5.440(Kb)] Comp=[0.23] RowsInMem=[0] SeekHeaderTime=[1(us)] -``` +The GROUP BY clause does not guarantee that the results are ordered. If you want to ensure that grouped data is ordered, use the ORDER BY clause. -**More explanation about above example**: -- Histogram about the rows stored in the data blocks of the table or STable: the value of rows for 5%, 10%, 20%, 30%, 40%, 50%, 60%, 70%, 80%, 90%, 95%, and 99% -- Minimum number of rows stored in a data block, i.e. Min=[392(Rows)] -- Maximum number of rows stored in a data block, i.e. Max=[800(Rows)] -- Average number of rows stored in a data block, i.e. Avg=[666(Rows)] -- stddev of number of rows, i.e. Stddev=[2.17] -- Total number of rows, i.e. Rows[2000] -- Total number of data blocks, i.e. Blocks=[3] -- Total disk size consumed, i.e. Size=[5.440(Kb)] -- Compression ratio, which means the compressed size divided by original size, i.e. Comp=[0.23] -- Total number of rows in memory, i.e. RowsInMem=[0], which means no rows in memory -- The time spent on reading head file (to retrieve data block information), i.e. SeekHeaderTime=[1(us)], which means 1 microsecond. +## PARTITION BY -## Special Keywords in TAOS SQL +The PARTITION BY clause is a TDengine-specific extension to standard SQL. This clause partitions data based on the part_list and performs computations per partition. -- `TBNAME`: it is treated as a special tag when selecting on a super table, representing the name of subtables in that super table. -- `_c0`: represents the first column of a table or super table. +For more information, see TDengine Extensions. -## Tips +## ORDER BY -To get all the subtables and corresponding tag values from a super table: +The ORDER BY keyword orders query results. If you do not include an ORDER BY clause in a query, the order of the results can be inconsistent. -```SQL -SELECT TBNAME, location FROM meters; -``` +You can specify integers after ORDER BY to indicate the order in which you want the items in the SELECT list to be displayed. For example, 1 indicates the first item in the select list. + +You can specify ASC for ascending order or DESC for descending order. + +You can also use the NULLS keyword to specify the position of null values. Ascending order uses NULLS LAST by default. Descending order uses NULLS FIRST by default. + +## LIMIT + +The LIMIT keyword controls the number of results that are displayed. You can also use the OFFSET keyword to specify the result to display first. `LIMIT` and `OFFSET` are executed after `ORDER BY` in the query execution. You can include an offset in a LIMIT clause. For example, LIMIT 5 OFFSET 2 can also be written LIMIT 2, 5. Both of these clauses display the third through the seventh results. + +In a statement that includes a PARTITON BY clause, the LIMIT keyword is performed on each partition, not on the entire set of results. + +## SLIMIT + +The SLIMIT keyword is used with a PARTITION BY clause to control the number of partitions that are displayed. You can include an offset in a SLIMIT clause. For example, SLIMIT 5 OFFSET 2 can also be written LIMIT 2, 5. Both of these clauses display the third through the seventh partitions. + +Note: If you include an ORDER BY clause, only one partition can be displayed. + +## Special Query -To get the number of sub tables in a super table: +Some special query functions can be invoked without `FROM` sub-clause. -```SQL -SELECT COUNT(TBNAME) FROM meters; +## Obtain Current Database + +The following SQL statement returns the current database. If a database has not been specified on login or with the `USE` command, a null value is returned. + +```sql +SELECT DATABASE(); ``` -Only filter on `TAGS` are allowed in the `where` clause for above two query statements. For example: +### Obtain Current Version +```sql +SELECT CLIENT_VERSION(); +SELECT SERVER_VERSION(); ``` -taos> SELECT TBNAME, location FROM meters; - tbname | location | -================================================================== - d1004 | California.LosAngeles | - d1003 | California.LosAngeles | - d1002 | California.SanFrancisco | - d1001 | California.SanFrancisco | -Query OK, 4 row(s) in set (0.000881s) -taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2; - count(tbname) | -======================== - 2 | -Query OK, 1 row(s) in set (0.001091s) +## Obtain Server Status + +The following SQL statement returns the status of the TDengine server. An integer indicates that the server is running normally. An error code indicates that an error has occurred. This statement can also detect whether a connection pool or third-party tool is connected to TDengine properly. By using this statement, you can ensure that connections in a pool are not lost due to an incorrect heartbeat detection statement. + +```sql +SELECT SERVER_STATUS(); ``` -- Wildcard \* can be used to get all columns, or specific column names can be specified. Arithmetic operation can be performed on columns of numerical types, columns can be renamed in the result set. -- Arithmetic operation on columns can't be used in where clause. For example, `where a*2>6;` is not allowed but `where a>6/2;` can be used instead for the same purpose. -- Arithmetic operation on columns can't be used as the objectives of select statement. For example, `select min(2*a) from t;` is not allowed but `select 2*min(a) from t;` can be used instead. -- Logical operation can be used in `WHERE` clause to filter numeric values, wildcard can be used to filter string values. -- Result sets are arranged in ascending order of the first column, i.e. timestamp, but it can be controlled to output as descending order of timestamp. If `order by` is used on other columns, the result may not be as expected. By the way, \_c0 is used to represent the first column, i.e. timestamp. -- `LIMIT` parameter is used to control the number of rows to output. `OFFSET` parameter is used to specify from which row to output. `LIMIT` and `OFFSET` are executed after `ORDER BY` in the query execution. A simple tip is that `LIMIT 5 OFFSET 2` can be abbreviated as `LIMIT 2, 5`. -- What is controlled by `LIMIT` is the number of rows in each group when `GROUP BY` is used. -- `SLIMIT` parameter is used to control the number of groups when `GROUP BY` is used. Similar to `LIMIT`, `SLIMIT 5 OFFSET 2` can be abbreviated as `SLIMIT 2, 5`. -- ">>" can be used to output the result set of `select` statement to the specified file. +### Obtain Current Time -## Where +```sql +SELECT NOW(); +``` -Logical operations in below table can be used in the `where` clause to filter the resulting rows. +### Obtain Current Date -| **Operation** | **Note** | **Applicable Data Types** | -| ------------- | ------------------------ | ----------------------------------------- | -| > | larger than | all types except bool | -| < | smaller than | all types except bool | -| >= | larger than or equal to | all types except bool | -| <= | smaller than or equal to | all types except bool | -| = | equal to | all types | -| <\> | not equal to | all types | -| is [not] null | is null or is not null | all types | -| between and | within a certain range | all types except bool | -| in | match any value in a set | all types except first column `timestamp` | -| like | match a wildcard string | **`binary`** **`nchar`** | -| match/nmatch | filter regex | **`binary`** **`nchar`** | +```sql +SELECT TODAY(); +``` -**Explanations**: +### Obtain Current Time Zone -- Operator `<\>` is equal to `!=`, please note that this operator can't be used on the first column of any table, i.e.timestamp column. -- Operator `like` is used together with wildcards to match strings - - '%' matches 0 or any number of characters, '\_' matches any single ASCII character. - - `\_` is used to match the \_ in the string. - - The maximum length of wildcard string is 100 bytes from version 2.1.6.1 (before that the maximum length is 20 bytes). `maxWildCardsLength` in `taos.cfg` can be used to control this threshold. A very long wildcard string may slowdown the execution performance of `LIKE` operator. -- `AND` keyword can be used to filter multiple columns simultaneously. AND/OR operation can be performed on single or multiple columns from version 2.3.0.0. However, before 2.3.0.0 `OR` can't be used on multiple columns. -- For timestamp column, only one condition can be used; for other columns or tags, `OR` keyword can be used to combine multiple logical operators. For example, `((value > 20 AND value < 30) OR (value < 12))`. - - From version 2.3.0.0, multiple conditions can be used on timestamp column, but the result set can only contain single time range. -- From version 2.0.17.0, operator `BETWEEN AND` can be used in where clause, for example `WHERE col2 BETWEEN 1.5 AND 3.25` means the filter condition is equal to "1.5 ≤ col2 ≤ 3.25". -- From version 2.1.4.0, operator `IN` can be used in the where clause. For example, `WHERE city IN ('California.SanFrancisco', 'California.SanDiego')`. For bool type, both `{true, false}` and `{0, 1}` are allowed, but integers other than 0 or 1 are not allowed. FLOAT and DOUBLE types are impacted by floating point precision errors. Only values that match the condition within the tolerance will be selected. Non-primary key column of timestamp type can be used with `IN`. -- From version 2.3.0.0, regular expression is supported in the where clause with keyword `match` or `nmatch`. The regular expression is case insensitive. +```sql +SELECT TIMEZONE(); +``` ## Regular Expression ### Syntax -```SQL +```txt WHERE (column|tbname) **match/MATCH/nmatch/NMATCH** _regex_ ``` ### Specification -The regular expression being used must be compliant with POSIX specification, please refer to [Regular Expressions](https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap09.html). +TDengine supports POSIX regular expression syntax. For more information, see [Regular Expressions](https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap09.html). ### Restrictions -Regular expression can be used against only table names, i.e. `tbname`, and tags of binary/nchar types, but can't be used against data columns. +Regular expression filtering is supported only on table names (TBNAME), BINARY tags, and NCHAR tags. Regular expression filtering cannot be performed on data columns. -The maximum length of regular expression string is 128 bytes. Configuration parameter `maxRegexStringLen` can be used to set the maximum allowed regular expression. It's a configuration parameter on the client side, and will take effect after restarting the client. +A regular expression string cannot exceed 128 bytes. You can configure this value by modifying the maxRegexStringLen parameter on the TDengine Client. The modified value takes effect when the client is restarted. ## JOIN -From version 2.2.0.0, inner join is fully supported in TDengine. More specifically, the inner join between table and table, between STable and STable, and between sub query and sub query are supported. +TDengine supports natural joins between supertables, between standard tables, and between subqueries. The difference between natural joins and inner joins is that natural joins require that the fields being joined in the supertables or standard tables must have the same name. Data or tag columns must be joined with the equivalent column in another table. -Only primary key, i.e. timestamp, can be used in the join operation between table and table. For example: +For standard tables, only the timestamp (primary key) can be used in join operations. For example: ```sql SELECT * @@ -356,25 +319,26 @@ FROM temp_tb_1 t1, pressure_tb_1 t2 WHERE t1.ts = t2.ts ``` -In the join operation between STable and STable, besides the primary key, i.e. timestamp, tags can also be used. For example: +For supertables, tags as well as timestamps can be used in join operations. For example: ```sql SELECT * -FROM temp_STable t1, temp_STable t2 +FROM temp_stable t1, temp_stable t2 WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0; ``` -Similarly, join operations can be performed on the result set of multiple sub queries. +Similarly, join operations can be performed on the result sets of multiple subqueries. :::note -Restrictions on join operation: -- The number of tables or STables in a single join operation can't exceed 10. -- `FILL` is not allowed in the query statement that includes JOIN operation. -- Arithmetic operation is not allowed on the result set of join operation. -- `GROUP BY` is not allowed on a part of tables that participate in join operation. -- `OR` can't be used in the conditions for join operation -- join operation can't be performed on data columns, i.e. can only be performed on tags or primary key, i.e. timestamp +The following restriction apply to JOIN statements: + +- The number of tables or supertables in a single join operation cannot exceed 10. +- `FILL` cannot be used in a JOIN statement. +- Arithmetic operations cannot be performed on the result sets of join operation. +- `GROUP BY` is not allowed on a segment of the tables that participate in a join operation. +- `OR` cannot be used in the conditions for join operation +- Join operation can be performed only on tags or timestamps. You cannot perform a join operation on data columns. ::: @@ -384,7 +348,7 @@ Nested query is also called sub query. This means that in a single SQL statement From 2.2.0.0, unassociated sub query can be used in the `FROM` clause. Unassociated means the sub query doesn't use the parameters in the parent query. More specifically, in the `tb_name_list` of `SELECT` statement, an independent SELECT statement can be used. So a complete nested query looks like: -```SQL +``` SELECT ... FROM (SELECT ... FROM ...) ...; ``` @@ -408,42 +372,42 @@ SELECT ... FROM (SELECT ... FROM ...) ...; ## UNION ALL -```SQL title=Syntax +```txt title=Syntax SELECT ... UNION ALL SELECT ... [UNION ALL SELECT ...] ``` -`UNION ALL` operator can be used to combine the result set from multiple select statements as long as the result set of these select statements have exactly the same columns. `UNION ALL` doesn't remove redundant rows from multiple result sets. In a single SQL statement, at most 100 `UNION ALL` can be supported. +TDengine supports the `UNION ALL` operation. `UNION ALL` operator can be used to combine the result set from multiple select statements as long as the result set of these select statements have exactly the same columns. `UNION ALL` doesn't remove redundant rows from multiple result sets. In a single SQL statement, at most 100 `UNION ALL` can be supported. ### Examples table `tb1` is created using below SQL statement: -```SQL +``` CREATE TABLE tb1 (ts TIMESTAMP, col1 INT, col2 FLOAT, col3 BINARY(50)); ``` The rows in the past one hour in `tb1` can be selected using below SQL statement: -```SQL +``` SELECT * FROM tb1 WHERE ts >= NOW - 1h; ``` The rows between 2018-06-01 08:00:00.000 and 2018-06-02 08:00:00.000 and col3 ends with 'nny' can be selected in the descending order of timestamp using below SQL statement: -```SQL +``` SELECT * FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' AND ts <= '2018-06-02 08:00:00.000' AND col3 LIKE '%nny' ORDER BY ts DESC; ``` The sum of col1 and col2 for rows later than 2018-06-01 08:00:00.000 and whose col2 is bigger than 1.2 can be selected and renamed as "complex", while only 10 rows are output from the 5th row, by below SQL statement: -```SQL +``` SELECT (col1 + col2) AS 'complex' FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' AND col2 > 1.2 LIMIT 10 OFFSET 5; ``` The rows in the past 10 minutes and whose col2 is bigger than 3.14 are selected and output to the result file `/home/testoutput.csv` with below SQL statement: -```SQL +``` SELECT COUNT(*) FROM tb1 WHERE ts >= NOW - 10m AND col2 > 3.14 >> /home/testoutput.csv; ``` diff --git a/docs/en/12-taos-sql/08-delete-data.mdx b/docs/en/12-taos-sql/08-delete-data.mdx index 86443dca53..999c467ad0 100644 --- a/docs/en/12-taos-sql/08-delete-data.mdx +++ b/docs/en/12-taos-sql/08-delete-data.mdx @@ -4,8 +4,7 @@ description: "Delete data from table or Stable" title: Delete Data --- -TDengine provides the functionality of deleting data from a table or STable according to specified time range, it can be used to cleanup abnormal data generated due to device failure. Please be noted that this functionality is only available in Enterprise version, please refer to [TDengine Enterprise Edition](https://tdengine.com/products#enterprise-edition-link) - +TDengine provides the functionality of deleting data from a table or STable according to specified time range, it can be used to cleanup abnormal data generated due to device failure. **Syntax:** @@ -16,21 +15,21 @@ DELETE FROM [ db_name. ] tb_name [WHERE condition]; **Description:** Delete data from a table or STable **Parameters:** - + - `db_name`: Optional parameter, specifies the database in which the table exists; if not specified, the current database will be used. - `tb_name`: Mandatory parameter, specifies the table name from which data will be deleted, it can be normal table, subtable or STable. -- `condition`: Optional parameter, specifies the data filter condition. If no condition is specified all data will be deleted, so please be cautions to delete data without any condition. The condition used here is only applicable to the first column, i.e. the timestamp column. If the table is a STable, the condition is also applicable to tag columns. +- `condition`: Optional parameter, specifies the data filter condition. If no condition is specified all data will be deleted, so please be cautions to delete data without any condition. The condition used here is only applicable to the first column, i.e. the timestamp column. **More Explanations:** - -The data can't be recovered once deleted, so please be cautious to use the functionality of deleting data. It's better to firstly make sure the data to be deleted using `select` then execute `delete`. + +The data can't be recovered once deleted, so please be cautious to use the functionality of deleting data. It's better to firstly make sure the data to be deleted using `select` then execute `delete`. **Example:** - -`meters` is a STable, in which `groupid` is a tag column of int type. Now we want to delete the data older than 2021-10-01 10:40:00.100 and `groupid` is 1. The SQL for this purpose is like below: + +`meters` is a STable, in which `groupid` is a tag column of int type. Now we want to delete the data older than 2021-10-01 10:40:00.100. You can perform this action by running the following SQL statement: ```sql -delete from meters where ts < '2021-10-01 10:40:00.100' and groupid=1 ; +delete from meters where ts < '2021-10-01 10:40:00.100' ; ``` The output is: diff --git a/docs/en/12-taos-sql/10-function.md b/docs/en/12-taos-sql/10-function.md index 6375422b07..36043e7456 100644 --- a/docs/en/12-taos-sql/10-function.md +++ b/docs/en/12-taos-sql/10-function.md @@ -1,13 +1,14 @@ --- +sidebar_label: Functions title: Functions toc_max_heading_level: 4 --- -## Single-Row Functions +## Single Row Functions -Single-Row functions return a result row for each row in the query result. +Single row functions return a result for each row. -### Numeric Functions +### Mathematical Functions #### ABS @@ -17,16 +18,15 @@ SELECT ABS(field_name) FROM { tb_name | stb_name } [WHERE clause] **Description**: The absolute value of a specific field. -**Return value type**: Same as input type. +**Return value type**: Same as the field being used -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables -**More explanations**: -- Cannot be used with aggregate functions. +**Usage**: This function can only be used on data columns. It can be used with selection and projection functions but not with aggregation functions. #### ACOS @@ -34,18 +34,17 @@ SELECT ABS(field_name) FROM { tb_name | stb_name } [WHERE clause] SELECT ACOS(field_name) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: The anti-cosine of a specific field. +**Description**: The arc cosine of a specific field. -**Return value type**: DOUBLE. +**Return value type**: Double -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables -**More explanations**: -- Cannot be used with aggregate functions. +**Usage**: This function can only be used on data columns. It can be used with selection and projection functions but not with aggregation functions. #### ASIN @@ -53,18 +52,18 @@ SELECT ACOS(field_name) FROM { tb_name | stb_name } [WHERE clause] SELECT ASIN(field_name) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: The anti-sine of a specific field. +**Description**: The arc sine of a specific field. -**Return value type**: DOUBLE. +**Return value type**: Double -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables + +**Usage**: This function can only be used on data columns. It can be used with selection and projection functions but not with aggregation functions. -**More explanations**: -- Cannot be used with aggregate functions. #### ATAN @@ -72,37 +71,36 @@ SELECT ASIN(field_name) FROM { tb_name | stb_name } [WHERE clause] SELECT ATAN(field_name) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: anti-tangent of a specific field. +**Description**: The arc tangent of a specific field. -**Return value type**: DOUBLE. +**Return value type**: Double -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables + +**Usage**: This function can only be used on data columns. It can be used with selection and projection functions but not with aggregation functions. -**More explanations**: -- Cannot be used with aggregate functions. #### CEIL -``` +```sql SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The rounded up value of a specific field. +**Description**: The rounded up value of a specific field -**Return value type**: Same as input type. +**Return value type**: Same as the field being used -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable +**Applicable table types**: standard tables and supertables -**Applicable nested query**: Inner query and outer query. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**More explanations**: -- Can't be used with aggregate functions. +**Usage**: This function can only be used on data columns. It can be used with selection and projection functions but not with aggregation functions. #### COS @@ -112,45 +110,43 @@ SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause] **Description**: The cosine of a specific field. -**Return value type**: DOUBLE. +**Return value type**: Double -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables -**More explanations**: -- Can't be used with aggregate functions. +**Usage**: This function can only be used on data columns. It can be used with selection and projection functions but not with aggregation functions. #### FLOOR -``` +```sql SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The rounded down value of a specific field. - -**More explanations**: Refer to `CEIL` function for usage restrictions. +**Description**: The rounded down value of a specific field + **More explanations**: The restrictions are same as those of the `CEIL` function. #### LOG ```sql -SELECT LOG(field_name, base) FROM { tb_name | stb_name } [WHERE clause] +SELECT LOG(field_name[, base]) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: The logarithm of a specific field with `base` as the radix. If `base` parameter is ignored, natural logarithm of the field is returned. +**Description**: The logarithm of a specific field with `base` as the radix. If you do not enter a base, the natural logarithm of the field is returned. + +**Return value type**: Double -**Return value type**: DOUBLE. +**Applicable data types**: Numeric -**Applicable data types**: Numeric types. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables -**Applicable nested query**: Inner query and Outer query. +**Usage**: This function can only be used on data columns. It can be used with selection and projection functions but not with aggregation functions. -**More explanations**: -- Can't be used with aggregate functions #### POW @@ -158,28 +154,28 @@ SELECT LOG(field_name, base) FROM { tb_name | stb_name } [WHERE clause] SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: The power of a specific field with `power` as the index. +**Description**: The power of a specific field with `power` as the exponent. + +**Return value type**: Double -**Return value type**: DOUBLE. +**Applicable data types**: Numeric -**Applicable data types**: Numeric types. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables -**Applicable nested query**: Inner query and Outer query. +**Usage**: This function can only be used on data columns. It can be used with selection and projection functions but not with aggregation functions. -**More explanations**: -- Can't be used with aggregate functions. #### ROUND -``` +```sql SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The rounded value of a specific field. +**Description**: The rounded value of a specific field. + **More explanations**: The restrictions are same as those of the `CEIL` function. -**More explanations**: Refer to `CEIL` function for usage restrictions. #### SIN @@ -189,18 +185,15 @@ SELECT SIN(field_name) FROM { tb_name | stb_name } [WHERE clause] **Description**: The sine of a specific field. -**Description**: The anti-cosine of a specific field. +**Return value type**: Double -**Return value type**: DOUBLE. +**Applicable data types**: Numeric -**Applicable data types**: Numeric types. - -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables -**More explanations**: -- Can't be used with aggregate functions. +**Usage**: This function can only be used on data columns. It can be used with selection and projection functions but not with aggregation functions. #### SQRT @@ -210,16 +203,15 @@ SELECT SQRT(field_name) FROM { tb_name | stb_name } [WHERE clause] **Description**: The square root of a specific field. -**Return value type**: DOUBLE. +**Return value type**: Double -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables -**More explanations**: -- Can't be used with aggregate functions. +**Usage**: This function can only be used on data columns. It can be used with selection and projection functions but not with aggregation functions. #### TAN @@ -229,39 +221,35 @@ SELECT TAN(field_name) FROM { tb_name | stb_name } [WHERE clause] **Description**: The tangent of a specific field. -**Description**: The anti-cosine of a specific field. - -**Return value type**: DOUBLE. +**Return value type**: Double -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables -**More explanations**: -- Can't be used with aggregate functions. +**Usage**: This function can only be used on data columns. It can be used with selection and projection functions but not with aggregation functions. -### String Functions +### Concatenation Functions -String functiosn take strings as input and output numbers or strings. +Concatenation functions take strings as input and produce string or numeric values as output. #### CHAR_LENGTH -``` +```sql SELECT CHAR_LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: The mumber of characters of a string. - -**Return value type**: INTEGER. +**Description**: The length in number of characters of a string -**Applicable data types**: VARCHAR, NCHAR. +**Return value type**: Bigint -**Applicable table types**: table, STable. +**Applicable data types**: VARCHAR and NCHAR -**Applicable nested query**: Inner query and Outer query. +**Nested query**: It can be used in both the outer query and inner query in a nested query. +**Applicable table types**: standard tables and supertables #### CONCAT @@ -269,144 +257,139 @@ SELECT CHAR_LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause] SELECT CONCAT(str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: The concatenation result of two or more strings. +**Description**: The concatenation result of two or more strings -**Return value type**: If all input strings are VARCHAR type, the result is VARCHAR type too. If any one of input strings is NCHAR type, then the result is NCHAR. If input strings contain NULL value, the result is NULL. +**Return value type**: If the concatenated strings are VARCHARs, the result is a VARCHAR. If the concatenated strings are NCHARs, the result is an NCHAR. If an input value is null, the result is null. -**Applicable data types**: VARCHAR, NCHAR. At least 2 input strings are required, and at most 8 input strings are allowed. +**Applicable data types**: VARCHAR and NCHAR You can concatenate between 2 and 8 strings. -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables #### CONCAT_WS -``` +```sql SELECT CONCAT_WS(separator, str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: The concatenation result of two or more strings with separator. +**Description**: The concatenation result of two or more strings with separator -**Return value type**: If all input strings are VARCHAR type, the result is VARCHAR type too. If any one of input strings is NCHAR type, then the result is NCHAR. If input strings contain NULL value, the result is NULL. +**Return value type**: If the concatenated strings are VARCHARs, the result is a VARCHAR. If the concatenated strings are NCHARs, the result is an NCHAR. If an input value is null, the result is null. -**Applicable data types**: VARCHAR, NCHAR. At least 3 input strings are required, and at most 9 input strings are allowed. +**Applicable data types**: VARCHAR and NCHAR You can concatenate between 3 and 9 strings. -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables #### LENGTH -``` +```sql SELECT LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: The length in bytes of a string. +**Description**: The length in bytes of a string -**Return value type**: INTEGER. +**Return value type**: Bigint -**Applicable data types**: VARCHAR, NCHAR. +**Applicable data types**: VARCHAR and NCHAR fields or columns -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables #### LOWER -``` +```sql SELECT LOWER(str|column) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: Convert the input string to lower case. +**Description**: Convert the input string to lower case -**Return value type**: Same as input type. +**Return value type**: Same as input -**Applicable data types**: VARCHAR, NCHAR. +**Applicable data types**: VARCHAR and NCHAR -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables #### LTRIM -``` +```sql SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: Remove the left leading blanks of a string. +**Description**: Remove the left leading blanks of a string -**Return value type**: Same as input type. +**Return value type**: Same as input -**Applicable data types**: VARCHAR, NCHAR. +**Applicable data types**: VARCHAR and NCHAR -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables #### RTRIM -``` -SELECT RTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause] +```sql +SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: Remove the right tailing blanks of a string. +**Description**: Remove the right tailing blanks of a string -**Return value type**: Same as input type. +**Return value type**: Same as input -**Applicable data types**: VARCHAR, NCHAR. +**Applicable data types**: VARCHAR and NCHAR -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables #### SUBSTR -``` +```sql SELECT SUBSTR(str,pos[,len]) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: The sub-string starting from `pos` with length of `len` from the original string `str`. - -**Return value type**: Same as input type. +**Description**: The sub-string starting from `pos` with length of `len` from the original string `str` - If `len` is not specified, it means from `pos` to the end. -**Applicable data types**: VARCHAR, NCHAR. +**Return value type**: Same as input -**Applicable table types**: table, STable. +**Applicable data types**: VARCHAR and NCHAR Parameter `pos` can be an positive or negative integer; If it's positive, the starting position will be counted from the beginning of the string; if it's negative, the starting position will be counted from the end of the string. -**Applicable nested query**: Inner query and Outer query. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**More explanations**: +**Applicable table types**: table, STable -- If the input is NULL, the output is NULL -- Parameter `pos` can be an positive or negative integer; If it's positive, the starting position will be counted from the beginning of the string; if it's negative, the starting position will be counted from the end of the string. -- If `len` is not specified, it means from `pos` to the end of string. #### UPPER -``` +```sql SELECT UPPER(str|column) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: Convert the input string to upper case. +**Description**: Convert the input string to upper case -**Return value type**: Same as input type. +**Return value type**: Same as input -**Applicable data types**: VARCHAR, NCHAR. +**Applicable data types**: VARCHAR and NCHAR -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: table, STable ### Conversion Functions -Conversion functions convert from one data type to another. +Conversion functions change the data type of a value. #### CAST @@ -414,19 +397,23 @@ Conversion functions convert from one data type to another. SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: Used for type casting. Convert `expression` to the type specified by `type_name`. +**Description**: Convert the input data `expression` into the type specified by `type_name`. This function can be used only in SELECT statements. -**Return value type**: The type specified by parameter `type_name`. +**Return value type**: The type specified by parameter `type_name` + +**Applicable data types**: All data types except JSON + +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable data types**: `expression` can be any data type except for JSON. +**Applicable table types**: standard tables and supertables **More explanations**: -- Error will be reported for unsupported type casting. +- Error will be reported for unsupported type casting - Some values of some supported data types may not be casted, below are known issues: - 1)When casting VARCHAR/NCHAR to BIGINT/BIGINT UNSIGNED, some characters may be treated as illegal, for example "a" may be converted to 0. - 2)When casting to numeric type, if converted result is out of range the destination data type can hold, overflow may occur and casting behavior is undefined. - 3) When casting to VARCHAR/NCHAR type, if converted string length exceeds the length specified in `type_name`, the result will be truncated. (e.g. CAST("abcd" as BINARY(2)) will return string "ab"). + 1. Some strings cannot be converted to numeric values. For example, the string `a` may be converted to `0`. However, this does not produce an error. + 2. If a converted numeric value is larger than the maximum size for the specified type, an overflow will occur. However, this does not produce an error. + 3. If a converted string value is larger than the maximum size for the specified type, the output value will be truncated. However, this does not produce an error. #### TO_ISO8601 @@ -434,18 +421,22 @@ SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause] SELECT TO_ISO8601(ts[, timezone]) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The ISO8601 date/time format converted from a UNIX timestamp, with timezone attached. `timezone` parameter allows attaching any customized timezone string to the output format. If `timezone` parameter is not specified, the timezone information of client side system will be attached. +**Description**: The ISO8601 date/time format converted from a UNIX timestamp, plus the timezone. You can specify any time zone with the timezone parameter. If you do not enter this parameter, the time zone on the client is used. -**Return value type**: VARCHAR. +**Return value type**: VARCHAR -**Applicable data types**: INTEGER, TIMESTAMP. +**Applicable data types**: Integers and timestamps -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. + +**Applicable table types**: standard tables and supertables **More explanations**: -- If the input is INTEGER represents UNIX timestamp, the precision of the returned value is determined by the digits of the input integer. -- If the input is of TIMESTAMP type, The precision of the returned value is same as the precision set for the current database in use. +- You can specify a time zone in the following format: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]。 For example, TO_ISO8601(1, "+00:00"). +- If the input is a UNIX timestamp, the precision of the returned value is determined by the digits of the input timestamp +- If the input is a column of TIMESTAMP type, the precision of the returned value is same as the precision set for the current data base in use + #### TO_JSON @@ -453,38 +444,44 @@ SELECT TO_ISO8601(ts[, timezone]) FROM { tb_name | stb_name } [WHERE clause]; SELECT TO_JSON(str_literal) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: Convert a JSON string to a JSON body. +**Description**: Converts a string into JSON. -**Return value type**: JSON. +**Return value type**: JSON -**Applicable data types**: JSON string, in the format like '{ "literal" : literal }'. '{}' is NULL value. keys in the string must be string constants, values can be constants of numeric types, bool, string or NULL. Escaping characters are not allowed in the JSON string. +**Applicable data types**: JSON strings in the form `{"literal": literal}`. `{}` indicates a null value. The key must be a string literal. The value can be a numeric literal, string literal, Boolean literal, or null literal. str_literal cannot include escape characters. -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. + +**Applicable table types**: table, STable -**Applicable nested query**: Inner query and Outer query. #### TO_UNIXTIMESTAMP ```sql -SELECT TO_UNIXTIMESTAMP(datetime_string | ts_col) FROM { tb_name | stb_name } [WHERE clause]; +SELECT TO_UNIXTIMESTAMP(datetime_string) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: UNIX timestamp converted from a string of date/time format. +**Description**: UNIX timestamp converted from a string of date/time format -**Return value type**: BIGINT. +**Return value type**: BIGINT -**Applicable data types**: VARCHAR, NCHAR. +**Applicable column types**: VARCHAR and NCHAR -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. + +**Applicable table types**: standard tables and supertables **More explanations**: -- The input string must be compatible with ISO8601/RFC3339 standard, NULL will be returned if the string cannot be converted. -- The precision of the returned timestamp is same as the precision set for the current database in use. +- The input string must be compatible with ISO8601/RFC3339 standard, NULL will be returned if the string can't be converted +- The precision of the returned timestamp is same as the precision set for the current data base in use + + +### Time and Date Functions -### DateTime Functions +These functions perform operations on times and dates. -DateTime functions applied to timestamp data. NOW(), TODAY() and TIMEZONE() are executed only once even though they may occur multiple times in a single SQL statement. +All functions that return the current time, such as `NOW`, `TODAY`, and `TIMEZONE`, are calculated only once per statement even if they appear multiple times. #### NOW @@ -494,61 +491,66 @@ SELECT select_expr FROM { tb_name | stb_name } WHERE ts_col cond_operatior NOW() INSERT INTO tb_name VALUES (NOW(), ...); ``` -**Description**: The current time of the client side system. +**Description**: The current time of the client side system -**Return value type**: TIMESTAMP. +**Return value type**: TIMESTAMP -**Applicable data types**: TIMESTAMP only if used in WHERE/INSERT clause. +**Applicable column types**: TIMESTAMP only -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables + +**Nested query**: It can be used in both the outer query and inner query in a nested query. **More explanations**: -- Addition and Subtraction operation with time duration can be performed, for example NOW() + 1s, the time unit can be one of the followings: - b(nanosecond), u(microsecond), a(millisecond)), s(second), m(minute), h(hour), d(day), w(week). -- The precision of the returned timestamp is same as the precision set for the current database in use. +- Add and Subtract operation can be performed, for example NOW() + 1s, the time unit can be: + b(nanosecond), u(microsecond), a(millisecond)), s(second), m(minute), h(hour), d(day), w(week) +- The precision of the returned timestamp is same as the precision set for the current data base in use + #### TIMEDIFF ```sql -SELECT TIMEDIFF(ts1 | datetime_string1, ts2 | datetime_string2 [, time_unit]) FROM { tb_name | stb_name } [WHERE clause]; +SELECT TIMEDIFF(ts | datetime_string1, ts | datetime_string2 [, time_unit]) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The difference(duration) between two timestamps, and rounded to the time unit specified by `time_unit`. +**Description**: The difference between two timestamps, and rounded to the time unit specified by `time_unit` -**Return value type**: BIGINT. +**Return value type**: BIGINT -**Applicable data types**: INTEGER/TIMESTAMP represents UNIX timestamp, or VARCHAR/NCHAR string in date/time format. +**Applicable column types**: UNIX-style timestamps in BIGINT and TIMESTAMP format and other timestamps in VARCHAR and NCHAR format -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables -**More explanations**: +**Nested query**: It can be used in both the outer query and inner query in a nested query. +**More explanations**: - Time unit specified by `time_unit` can be: - 1b(nanosecond), 1u(microsecond),1a(millisecond),1s(second),1m(minute),1h(hour),1d(day),1w(week). -- If `time_unit` parameter is not specified, the precision of the returned time duration is same as the precision set for the current database in use. -- If input date-time string cannot be converted to UNIX timestamp, NULL value is returned. + 1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks) +- The precision of the returned timestamp is same as the precision set for the current data base in use +- If the input data is not formatted as a timestamp, the returned value is null. + #### TIMETRUNCATE ```sql -SELECT TIMETRUNCATE(ts_val | datetime_string | ts_col, time_unit) FROM { tb_name | stb_name } [WHERE clause]; +SELECT TIMETRUNCATE(ts | datetime_string , time_unit) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: Truncate the input timestamp with unit specified by `time_unit`. +**Description**: Truncate the input timestamp with unit specified by `time_unit` -**Return value type**: TIMESTAMP. +**Return value type**: TIMESTAMP -**Applicable data types**: INTEGER/TIMESTAMP represents UNIX timestamp, or VARCHAR/NCHAR string in date/time format. +**Applicable column types**: UNIX-style timestamps in BIGINT and TIMESTAMP format and other timestamps in VARCHAR and NCHAR format -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables **More explanations**: - - Time unit specified by `time_unit` can be: - 1b(nanosecond),1u(microsecond),1a(millisecond),1s(second),1m(minute),1h(hour),1d(day),1w(week). -- The precision of the returned timestamp is same as the precision set for the current database in use. -- If input date-time string cannot be converted to UNIX timestamp, NULL value is returned. + 1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks) +- The precision of the returned timestamp is same as the precision set for the current data base in use +- If the input data is not formatted as a timestamp, the returned value is null. + #### TIMEZONE @@ -556,13 +558,14 @@ SELECT TIMETRUNCATE(ts_val | datetime_string | ts_col, time_unit) FROM { tb_name SELECT TIMEZONE() FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The timezone of the client side system. +**Description**: The timezone of the client side system -**Return value type**: VARCHAR. +**Applicable data types**: VARCHAR -**Applicable data types**: None. +**Applicable column types**: None + +**Applicable table types**: standard tables and supertables -**Applicable table types**: table, STable. #### TODAY @@ -572,269 +575,269 @@ SELECT select_expr FROM { tb_name | stb_name } WHERE ts_col cond_operatior TODAY INSERT INTO tb_name VALUES (TODAY(), ...); ``` -**Description**: The timestamp of 00:00:00 of the client side system. +**Description**: The timestamp of 00:00:00 of the client side system -**Return value type**: TIMESTAMP. +**Return value type**: TIMESTAMP -**Applicable data types**: TIMESTAMP only if used in WHERE/INSERT clause. +**Applicable column types**: TIMESTAMP only -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables **More explanations**: -- Addition and Subtraction operation can be performed with time durations, for example NOW() + 1s, the time unit can be: - b(nanosecond), u(microsecond), a(millisecond)), s(second), m(minute), h(hour), d(day), w(week). -- The precision of the returned timestamp is same as the precision set for the current database in use. +- Add and Subtract operation can be performed, for example TODAY() + 1s, the time unit can be: + b(nanosecond), u(microsecond), a(millisecond)), s(second), m(minute), h(hour), d(day), w(week) +- The precision of the returned timestamp is same as the precision set for the current data base in use + ## Aggregate Functions -Aggregate functions return single result row for each group in the query result set. Groups are determined by `GROUP BY` clause or time window clause if they are used; or the whole result is considered a group if neither of them is used. +Aggregate functions return one row per group. You can use windows or GROUP BY to group data. Otherwise, the entire query is considered a single group. + +TDengine supports the following aggregate functions: ### APERCENTILE +```sql +SELECT APERCENTILE(field_name, P[, algo_type]) FROM { tb_name | stb_name } [WHERE clause] ``` -SELECT APERCENTILE(field_name, P[, algo_type]) -FROM { tb_name | stb_name } [WHERE clause] -``` - -**Description**: Similar to `PERCENTILE`, but a approximated result is returned. -**Return value type**: DOUBLE. +**Description**: Similar to `PERCENTILE`, but a simulated result is returned -**Applicable data types**: Numeric types. +**Return value type**: DOUBLE -**Applicable table types**: table, STable. +**Applicable data types**: Numeric -**More explanations** +**Applicable table types**: standard tables and supertables +**Explanations**: - _P_ is in range [0,100], when _P_ is 0, the result is same as using function MIN; when _P_ is 100, the result is same as function MAX. -- **algo_type** can only be input as `default` or `t-digest`, if it's not specified `default` will be used, i.e. `apercentile(column_name, 50)` is same as `apercentile(column_name, 50, "default")`. -- If `default` is used, histogram based algorithm is used for calculation. If `t-digest` is used, `t-digest` sampling algorithm is used to calculate the result. - -**Nested query**: It can be used in both the outer query and inner query in a nested query. +- `algo_type` can only be input as `default` or `t-digest` Enter `default` to use a histogram-based algorithm. Enter `t-digest` to use the t-digest algorithm to calculate the approximation of the quantile. `default` is used by default. ### AVG -``` +```sql SELECT AVG(field_name) FROM tb_name [WHERE clause]; ``` -**Description**: Get the average value of a column in a table or STable. +**Description**: The average value of the specified fields. + +**Return value type**: DOUBLE -**Return value type**: DOUBLE. +**Applicable data types**: Numeric -**Applicable data types**: Numeric type. +**Applicable table types**: standard tables and supertables -**Applicable table types**: table, STable. ### COUNT -``` +```sql SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause]; ``` -**Description**: Get the number of rows in a table or a super table. +**Description**: The number of records in the specified fields. -**Return value type**: BIGINT. +**Return value type**: BIGINT -**Applicable data types**: All data types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables **More explanation**: -- Wildcard (\*) is used to represent all columns. If \* used `COUNT` function will get the total number of all rows. -- The number of non-NULL values will be returned if this function is used on a specific column. +- Wildcard (\*) is used to represent all columns. +If you input a specific column, the number of non-null values in the column is returned. + ### ELAPSED -```mysql -SELECT ELAPSED(field_name[, time_unit]) FROM { tb_name | stb_name } [WHERE clause] [INTERVAL(interval [, offset]) [SLIDING sliding]]; +```sql +SELECT ELAPSED(ts_primary_key [, time_unit]) FROM { tb_name | stb_name } [WHERE clause] [INTERVAL(interval [, offset]) [SLIDING sliding]]; ``` -**Description**:`elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calcualted time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time duration within the specified time range. Please be noted that the calculated time duration is in the specified `time_unit`. +**Description**:`elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calcualted time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length. -**Return value type**:DOUBLE. +**Return value type**: Double if the input value is not NULL; -**Applicable data type**:TIMESTAMP. +**Return value type**: TIMESTAMP -**Applicable tables**: table, STable, outter in nested query. +**Applicable tables**: table, STable, outter in nested query **Explanations**: - - `field_name` parameter can only be the first column of a table, i.e. timestamp primary key. -- The minimum value of `time_unit` is the time precision of the database. If `time_unit` is not specified, the time precision of the database is used as the default ime unit. +- The minimum value of `time_unit` is the time precision of the database. If `time_unit` is not specified, the time precision of the database is used as the default time unit. Time unit specified by `time_unit` can be: + 1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks) - It can be used with `INTERVAL` to get the time valid time length of each time window. Please be noted that the return value is same as the time window for all time windows except for the first and the last time window. - `order by asc/desc` has no effect on the result. - `group by tbname` must be used together when `elapsed` is used against a STable. - `group by` must NOT be used together when `elapsed` is used against a table or sub table. -- When used in nested query, it's only applicable when the inner query outputs an implicit timestamp column as the primary key. For example, `select elapsed(ts) from (select diff(value) from sub1)` is legal usage while `select elapsed(ts) from (select * from sub1)` is not. -- It cannot be used with `leastsquares`, `diff`, `derivative`, `top`, `bottom`, `last_row`, `interp`. +- When used in nested query, it's only applicable when the inner query outputs an implicit timestamp column as the primary key. For example, `select elapsed(ts) from (select diff(value) from sub1)` is legal usage while `select elapsed(ts) from (select * from sub1)` is not. In addition, because elapsed has a strict dependency on the timeline, a statement like `select elapsed(ts) from (select diff(value) from st group by tbname) will return a meaningless result. +- It can't be used with `leastsquares`, `diff`, `derivative`, `top`, `bottom`, `last_row`, `interp`. + ### LEASTSQUARES -``` +```sql SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause]; ``` **Description**: The linear regression function of the specified column and the timestamp column (primary key), `start_val` is the initial value and `step_val` is the step value. -**Return value type**: VARCHAR string in the format of "(slope, intercept)". +**Return value type**: A string in the format of "(slope, intercept)" + +**Applicable data types**: Numeric -**Applicable data types**: Numeric types. +**Applicable table types**: table only -**Applicable table types**: table only. ### SPREAD -``` +```sql SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The difference between the max and the min value of a specific column. +**Description**: The difference between the max and the min of a specific column -**Return value type**: DOUBLE. +**Return value type**: DOUBLE -**Applicable data types**: Numeric types. +**Applicable data types**: Integers and timestamps -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables -**More explanations**: Can be used on a column of TIMESTAMP type, the result time unit precision is same as the current database in use. ### STDDEV -``` +```sql SELECT STDDEV(field_name) FROM tb_name [WHERE clause]; ``` -**Description**: Standard deviation of a specific column in a table or STable. +**Description**: Standard deviation of a specific column in a table or STable -**Return value type**: DOUBLE. +**Return value type**: DOUBLE -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric + +**Applicable table types**: standard tables and supertables -**Applicable table types**: table, STable. ### SUM -``` +```sql SELECT SUM(field_name) FROM tb_name [WHERE clause]; ``` -**Description**: The summation of values of a specific column in a table or STable. +**Description**: The sum of a specific column in a table or STable + +**Return value type**: DOUBLE or BIGINT -**Return value type**: DOUBLE. +**Applicable data types**: Numeric -**Applicable data types**: Numeric types. +**Applicable table types**: standard tables and supertables -**Applicable table types**: table, STable. ### HYPERLOGLOG -``` +```sql SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**:The cardinal number of a specific column is returned by using hyperloglog algorithm. +**Description**: + The cardinal number of a specific column is returned by using hyperloglog algorithm. The benefit of using hyperloglog algorithm is that the memory usage is under control when the data volume is huge. + However, when the data volume is very small, the result may be not accurate, it's recommented to use `select count(data) from (select unique(col) as data from table)` in this case. + +**Return value type**: Integer -**Return value type**: INTEGER. +**Applicable data types**: Numeric -**Applicable data types**: All data types. +**Applicable table types**: standard tables and supertables -**More explanations**: The benefit of using hyperloglog algorithm is that the memory usage is under control when the data volume is huge. However, when the data volume is very small, the result may be not accurate, it's recommented to use `select count(data) from (select unique(col) as data from table)` in this case. ### HISTOGRAM -``` +```sql SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_name [WHERE clause]; ``` **Description**:Returns count of data points in user-specified ranges. -**Return value type**:DOUBLE or BIGINT, depends on normalized parameter settings. +**Return value type** If normalized is set to 1, a DOUBLE is returned; otherwise a BIGINT is returned -**Applicable data type**:Numerical types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Applicable table types**: table, STable **Explanations**: - - bin_type: parameter to indicate the bucket type, valid inputs are: "user_input", "linear_bin", "log_bin"。 -- bin_description: parameter to describe the rule to generate buckets,can be in the following JSON formats for each bin_type respectively: - - - "user_input": "[1, 3, 5, 7]": User specified bin values. - - - "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}" - "start" - bin starting point. - "width" - bin offset. - "count" - number of bins generated. - "infinity" - whether to add(-inf, inf)as start/end point in generated set of bins. - The above "linear_bin" descriptor generates a set of bins: [-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf]. +- bin_description: parameter to describe how to generate buckets,can be in the following JSON formats for each bin_type respectively: + - "user_input": "[1, 3, 5, 7]": + User specified bin values. + + - "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}" + "start" - bin starting point. "width" - bin offset. "count" - number of bins generated. "infinity" - whether to add(-inf, inf)as start/end point in generated set of bins. + The above "linear_bin" descriptor generates a set of bins: [-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf]. + + - "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}" + "start" - bin starting point. "factor" - exponential factor of bin offset. "count" - number of bins generated. "infinity" - whether to add(-inf, inf)as start/end point in generated range of bins. + The above "linear_bin" descriptor generates a set of bins: [-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf]. +- normalized: setting to 1/0 to turn on/off result normalization. Valid values are 0 or 1. - - "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}" - "start" - bin starting point. - "factor" - exponential factor of bin offset. - "count" - number of bins generated. - "infinity" - whether to add(-inf, inf)as start/end point in generated range of bins. - The above "log_bin" descriptor generates a set of bins:[-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf]. - -- normalized: setting to 1/0 to turn on/off result normalization. ### PERCENTILE -``` +```sql SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause]; ``` **Description**: The value whose rank in a specific column matches the specified percentage. If such a value matching the specified percentage doesn't exist in the column, an interpolation value will be returned. -**Return value type**: DOUBLE. +**Return value type**: DOUBLE -**Applicable data types**: Numeric types. +**Applicable column types**: Numeric -**Applicable table types**: table. +**Applicable table types**: table only **More explanations**: _P_ is in range [0,100], when _P_ is 0, the result is same as using function MIN; when _P_ is 100, the result is same as function MAX. -## Selector Functions -Selector functiosn choose one or more rows in the query result according to the semantics. You can specify to output primary timestamp column and other columns including tbname and tags so that you can easily know which rows the selected values belong to. +## Selection Functions + +Selection functions return one or more results depending. You can specify the timestamp column, tbname pseudocolumn, or tag columns to show which rows contain the selected value. ### BOTTOM -``` +```sql SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause]; ``` **Description**: The least _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly. -**Return value type**: Same as the column being operated upon. +**Return value type**:Same as the data type of the column being operated upon -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables -**More explanations**: +**More explanation**: -- _k_ must be in range [1,100]. -- The timestamp associated with the selected values are returned too. -- Can't be used with `FILL`. +- _k_ must be in range [1,100] +- The timestamp associated with the selected values are returned too +- Can't be used with `FILL` ### FIRST -``` +```sql SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The first non-null value of a specific column in a table or STable. +**Description**: The first non-null value of a specific column in a table or STable -**Return value type**: Same as the column being operated upon. +**Return value type**:Same as the data type of the column being operated upon -**Applicable data types**: All data types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables -**More explanations**: +**More explanation**: - FIRST(\*) can be used to get the first non-null value of all columns - NULL will be returned if all the values of the specified column are all NULL @@ -842,17 +845,17 @@ SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause]; ### INTERP -``` +```sql SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ RANGE(timestamp1,timestamp2) ] [EVERY(interval)] [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})]; ``` **Description**: The value that matches the specified timestamp range is returned, if existing; or an interpolation value is returned. -**Return value type**: Same as the column being operated upon. +**Return value type**: Same as the column being operated upon -**Applicable data types**: Numeric data types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable, nested query. +**Applicable table types**: standard tables and supertables **More explanations** @@ -866,201 +869,208 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ ### LAST -``` +```sql SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The last non-NULL value of a specific column in a table or STable. +**Description**: The last non-NULL value of a specific column in a table or STable -**Return value type**: Same as the column being operated upon. +**Return value type**:Same as the data type of the column being operated upon -**Applicable data types**: All data types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables -**More explanations**: +**More explanation**: - LAST(\*) can be used to get the last non-NULL value of all columns - If the values of a column in the result set are all NULL, NULL is returned for that column; if all columns in the result are all NULL, no result will be returned. - When it's used on a STable, if there are multiple values with the timestamp in the result set, one of them will be returned randomly and it's not guaranteed that the same value is returned if the same query is run multiple times. + ### LAST_ROW -``` +```sql SELECT LAST_ROW(field_name) FROM { tb_name | stb_name }; ``` -**Description**: The last row of a table or STable. +**Description**: The last row of a table or STable -**Return value type**: Same as the column being operated upon. +**Return value type**:Same as the data type of the column being operated upon -**Applicable data types**: All data type. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables **More explanations**: -- When it's used against a STable, multiple rows with the same and largest timestamp may exist, in this case one of them is returned randomly and it's not guaranteed that the result is same if the query is run multiple times. -- Cannot be used with `INTERVAL`. +- When it's used on a STable, if there are multiple values with the timestamp in the result set, one of them will be returned randomly and it's not guaranteed that the same value is returned if the same query is run multiple times. +- Can't be used with `INTERVAL`. ### MAX -``` +```sql SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The maximum value of a specific column of a table or STable. +**Description**: The maximum value of a specific column of a table or STable + +**Return value type**:Same as the data type of the column being operated upon -**Return value type**: Same as the data type of the column being operated upon. +**Applicable data types**: Numeric -**Applicable data types**: Numeric types. +**Applicable table types**: standard tables and supertables -**Applicable table types**: table, STable. ### MIN -``` +```sql SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause]; ``` -**Description**: The minimum value of a specific column in a table or STable. +**Description**: The minimum value of a specific column in a table or STable + +**Return value type**:Same as the data type of the column being operated upon -**Return value type**: Same as the data type of the column being operated upon. +**Applicable data types**: Numeric -**Applicable data types**: Numeric types. +**Applicable table types**: standard tables and supertables -**Applicable table types**: table, STable. ### MODE -``` +```sql SELECT MODE(field_name) FROM tb_name [WHERE clause]; ``` **Description**:The value which has the highest frequency of occurrence. NULL is returned if there are multiple values which have highest frequency of occurrence. -**Return value type**:Same as the data type of the column being operated upon. +**Return value type**: Same as the input data -**Applicable data types**: All data types. +**Applicable data types**: Numeric + +**Applicable table types**: standard tables and supertables -**More explanations**:Considering the number of returned result set is unpredictable, it's suggested to limit the number of unique values to 100,000, otherwise error will be returned. ### SAMPLE ```sql - SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause] +SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause] ``` **Description**: _k_ sampling values of a specific column. The applicable range of _k_ is [1,1000]. -**Return value type**: Same as the column being operated. +**Return value type**: Same as the column being operated plus the associated timestamp -**Applicable data types**: All data types. +**Applicable data types**: Any data type except for tags of STable -**Applicable table types**: table, STable. +**Applicable nested query**: Inner query and Outer query -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables -**More explanations**: +**More explanations**: + +This function cannot be used in expression calculation. +- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline -- Arithmetic operation cannot be operated on the result of `SAMPLE` function -- Must be used with `Partition by tbname` when it's used on a STable to force the result on each single timeline. ### TAIL -``` +```sql SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause]; ``` **Description**: The next _k_ rows are returned after skipping the last `offset_val` rows, NULL values are not ignored. `offset_val` is optional parameter. When it's not specified, the last _k_ rows are returned. When `offset_val` is used, the effect is same as `order by ts desc LIMIT k OFFSET offset_val`. -**Parameter value range**: k: [1,100] offset_val: [0,100]. +**Parameter value range**: k: [1,100] offset_val: [0,100] + +**Return value type**:Same as the data type of the column being operated upon -**Return value type**: Same as the column being operated upon. +**Applicable data types**: Any data type except for timestamp, i.e. the primary key + +**Applicable table types**: standard tables and supertables -**Applicable data types**: All data types. ### TOP -``` +```sql SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause]; ``` **Description**: The greatest _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly. -**Return value type**: Same as the column being operated upon. +**Return value type**:Same as the data type of the column being operated upon -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables -**More explanations**: +**More explanation**: -- _k_ must be in range [1,100]. -- The timestamp associated with the selected values are returned too. -- Cannot be used with `FILL`. +- _k_ must be in range [1,100] +- The timestamp associated with the selected values are returned too +- Can't be used with `FILL` ### UNIQUE -``` +```sql SELECT UNIQUE(field_name) FROM {tb_name | stb_name} [WHERE clause]; ``` -**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword, but it can also be used to match tags or timestamp. +**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword, but it can also be used to match tags or timestamp. The first occurrence of a timestamp or tag is used. -**Return value type**: Same as the column or tag being operated upon. +**Return value type**:Same as the data type of the column being operated upon -**Applicable data types**: All data types. +**Applicable column types**: Any data types except for timestamp -**More explanations**: +**Applicable table types**: table, STable -- It can be used against table or STable, but can't be used together with time window, like `interval`, `state_window` or `session_window` . -- Considering the number of result sets is unpredictable, it's suggested to limit the distinct values under 100,000 to control the memory usage, otherwise error will be returned. -## Time-Series Specific Functions +## Time-Series Extensions -TDengine provides a set of time-series specific functions to better meet the requirements in querying time-series data. In general databases, similar functionalities can only be achieved with much more complex syntax and much worse performance. TDengine provides these functionalities in builtin functions so that the burden on user side is minimized. +TDengine includes extensions to standard SQL that are intended specifically for time-series use cases. The functions enabled by these extensions require complex queries to implement in general-purpose databases. By offering them as built-in extensions, TDengine reduces user workload. ### CSUM ```sql - SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause] +SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause] ``` **Description**: The cumulative sum of each row for a specific column. The number of output rows is same as that of the input rows. -**Return value type**: BIGINT for signed integer input types; UNSIGNED BIGINT for unsigned integer input types; DOUBLE for floating point input types. +**Return value type**: Long integer for integers; Double for floating points. uint64_t for unsigned integers -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables + +**More explanations**: + +- Arithmetic operation can't be performed on the result of `csum` function +- Can only be used with aggregate functions This function can be used with supertables and standard tables. +- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline -**More explanations**: -- Arithmetic operation cannot be performed on the result of `csum` function. -- Can only be used with aggregate functions. -- `Partition by tbname` must be used together on a STable to force the result on a single timeline. ### DERIVATIVE -``` +```sql SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHERE clause]; ``` **Description**: The derivative of a specific column. The time rage can be specified by parameter `time_interval`, the minimum allowed time range is 1 second (1s); the value of `ignore_negative` can be 0 or 1, 1 means negative values are ignored. -**Return value type**: DOUBLE. - -**Applicable data types**: Numeric types. +**Return value type**: DOUBLE -**Applicable table types**: table, STable. +**Applicable data types**: Numeric -**More explanations**: +**Applicable table types**: standard tables and supertables -- The number of result rows is the number of total rows in the time range subtracted by one, no output for the first row. +**More explanation**: + - It can be used together with `PARTITION BY tbname` against a STable. -- Can be used together with selection of relative columns. E.g. select \_rowts, DERIVATIVE() from. +- It can be used together with a selected column. For example: select \_rowts, DERIVATIVE() from。 ### DIFF @@ -1070,159 +1080,159 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER **Description**: The different of each row with its previous row for a specific column. `ignore_negative` can be specified as 0 or 1, the default value is 1 if it's not specified. `1` means negative values are ignored. -**Return value type**: Same as the column being operated upon. +**Return value type**:Same as the data type of the column being operated upon -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables -**More explanations**: +**More explanation**: + +- The number of result rows is the number of rows subtracted by one, no output for the first row +- It can be used together with a selected column. For example: select \_rowts, DIFF() from。 -- The number of result rows is the number of rows subtracted by one, no output for the first row. -- It can be used on STable with `PARTITION by tbname`. -- Can be used together with selection of relative columns. E.g. select \_rowts, DIFF() from. ### IRATE -``` +```sql SELECT IRATE(field_name) FROM tb_name WHERE clause; ``` **Description**: instantaneous rate on a specific column. The last two samples in the specified time range are used to calculate instantaneous rate. If the last sample value is smaller, then only the last sample value is used instead of the difference between the last two sample values. -**Return value type**: DOUBLE. +**Return value type**: DOUBLE -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables -**More explanations**: - -- It can be used on stble with `PARTITION BY`, i.e. timelines generated by `PARTITION BY tbname` on a STable. ### MAVG ```sql - SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause] +SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause] ``` **Description**: The moving average of continuous _k_ values of a specific column. If the number of input rows is less than _k_, nothing is returned. The applicable range of _k_ is [1,1000]. -**Return value type**: DOUBLE. +**Return value type**: DOUBLE -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable nested query**: Inner query and Outer query. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables -**More explanations**: +**More explanations**: + +- Arithmetic operation can't be performed on the result of `MAVG`. +- Can only be used with data columns, can't be used with tags. - Can't be used with aggregate functions. +- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline -- Arithmetic operation cannot be performed on the result of `MAVG`. -- Cannot be used with aggregate functions. -- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline. ### STATECOUNT -``` +```sql SELECT STATECOUNT(field_name, oper, val) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The number of continuous rows satisfying the specified conditions for a specific column. If the specified condition is evaluated as true, the number is increased by 1; otherwise the number is reset to -1. If the input value is NULL, then the corresponding row is skipped. +**Description**: The number of continuous rows satisfying the specified conditions for a specific column. The result is shown as an extra column for each row. If the specified condition is evaluated as true, the number is increased by 1; otherwise the number is reset to -1. If the input value is NULL, then the corresponding row is skipped. **Applicable parameter values**: -- oper : Can be one of "LT" (lower than), "GT" (greater than), "LE" (lower than or euqal to), "GE" (greater than or equal to), "NE" (not equal to), "EQ" (equal to). -- val : Numeric types. +- oper : Can be one of `LT` (lower than), `GT` (greater than), `LE` (lower than or equal to), `GE` (greater than or equal to), `NE` (not equal to), `EQ` (equal to), the value is case insensitive +- val : Numeric types -**Return value type**: INTEGER. +**Return value type**: Integer -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Applicable nested query**: Outer query only -**Applicable nested query**: Outer query only. +**Applicable table types**: standard tables and supertables **More explanations**: -- Must be used together with `PARTITION BY tbname` when it's used on a STable to force the result into each single timeline. -- Cannot be used with window operation, like interval/state_window/session_window. +- Must be used together with `PARTITION BY tbname` when it's used on a STable to force the result into each single timeline] +- Can't be used with window operation, like interval/state_window/session_window + ### STATEDURATION -``` +```sql SELECT stateDuration(field_name, oper, val, unit) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The length of time range in which all rows satisfy the specified condition for a specific column. The length for the first row that satisfies the condition is 0. Next, if the condition is evaluated as true for a row, the time interval between current row and its previous row is added up to the time range; otherwise the time range length is reset to -1. If the value of the column is NULL, the corresponding row is skipped. +**Description**: The length of time range in which all rows satisfy the specified condition for a specific column. The result is shown as an extra column for each row. The length for the first row that satisfies the condition is 0. Next, if the condition is evaluated as true for a row, the time interval between current row and its previous row is added up to the time range; otherwise the time range length is reset to -1. If the value of the column is NULL, the corresponding row is skipped. **Applicable parameter values**: -- oper : Can be one of "LT" (lower than), "GT" (greater than), "LE" (lower than or euqal to), "GE" (greater than or equal to), "NE" (not equal to), "EQ" (equal to). -- val : Numeric types. -- unit : The unit of time interval, can be: 1b(nanosecond), 1u(microsecond),1a(millisecond),1s(second),1m(minute),1h(hour),1d(day),1w(week). If not specified, default is same as the current database time precision in use. +- oper : Can be one of `LT` (lower than), `GT` (greater than), `LE` (lower than or equal to), `GE` (greater than or equal to), `NE` (not equal to), `EQ` (equal to), the value is case insensitive +- val : Numeric types +- unit: The unit of time interval. Enter one of the following options: 1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks) If you do not enter a unit of time, the precision of the current database is used by default. -**Return value type**: INTEGER. +**Return value type**: Integer -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Applicable nested query**: Outer query only -**Applicable nested query**: Outer query only. +**Applicable table types**: standard tables and supertables **More explanations**: -- Must be used together with `PARTITION BY tbname` when it's used on a STable to force the result into each single timeline. -- Cannot be used with window operation, like interval/state_window/session_window. +- Must be used together with `PARTITION BY tbname` when it's used on a STable to force the result into each single timeline] +- Can't be used with window operation, like interval/state_window/session_window + ### TWA -``` +```sql SELECT TWA(field_name) FROM tb_name WHERE clause; ``` -**Description**: Time weighted average on a specific column within a time range. +**Description**: Time weighted average on a specific column within a time range -**Return value type**: DOUBLE. +**Return value type**: DOUBLE -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables -**More explanations**: +- Must be used together with `PARTITION BY tbname` to force the result into each single timeline. -- It can be used on stable with `PARTITION BY`, i.e. timelines generated by `PARTITION BY tbname` on a STable. ## System Information Functions ### DATABASE -``` +```sql SELECT DATABASE(); ``` -**Description**:Return the current database being used. If the user doesn't specify database when logon and doesn't use `USE` SQL command to switch the datbase, this function returns NULL. +**Description**: The current database. If no database is specified upon logging in and no database is specified with `USE` after login, NULL will be returned by `select database()`. + ### CLIENT_VERSION -``` +```sql SELECT CLIENT_VERSION(); ``` -**Description**:Return the client version. +**Description**: The client version. ### SERVER_VERSION -``` +```sql SELECT SERVER_VERSION(); ``` -**Description**:Returns the server version. +**Description**: The server version. ### SERVER_STATUS -``` +```sql SELECT SERVER_VERSION(); ``` -**Description**:Returns the server's status. +**Description**: The server status. diff --git a/docs/en/12-taos-sql/12-distinguished.md b/docs/en/12-taos-sql/12-distinguished.md index d2f7cf66b6..7390fe983f 100644 --- a/docs/en/12-taos-sql/12-distinguished.md +++ b/docs/en/12-taos-sql/12-distinguished.md @@ -1,60 +1,35 @@ --- -sidebar_label: Distinguished -title: Distinguished Query for Time Series Database +sidebar_label: Time-Series Extensions +title: Time-Series Extensions --- -Aggregation by time window is supported in TDengine. For example, in the case where temperature sensors report the temperature every seconds, the average temperature for every 10 minutes can be retrieved by performing a query with a time window. -Window related clauses are used to divide the data set to be queried into subsets and then aggregation is performed across the subsets. There are three kinds of windows: time window, status window, and session window. There are two kinds of time windows: sliding window and flip time/tumbling window. +As a purpose-built database for storing and processing time-series data, TDengine provides time-series-specific extensions to standard SQL. -## Time Window +These extensions include tag-partitioned queries and windowed queries. -The `INTERVAL` clause is used to generate time windows of the same time interval. The `SLIDING` parameter is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining a continuous query, both the size of the time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e] ,[t1s , t1e], [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time/tumbling window. - -![TDengine Database Time Window](./timewindow-1.webp) +## Tag-Partitioned Queries -`INTERVAL` and `SLIDING` should be used with aggregate functions and select functions. The SQL statement below is illegal because no aggregate or selection function is used with `INTERVAL`. +When you query a supertable, you may need to partition the supertable by tag and perform additional operations on a specific partition. In this case, you can use the following SQL clause: +```sql +PARTITION BY part_list ``` -SELECT * FROM temp_tb_1 INTERVAL(1m); -``` - -The time step specified by `SLIDING` cannot exceed the time interval specified by `INTERVAL`. The SQL statement below is illegal because the time length specified by `SLIDING` exceeds that specified by `INTERVAL`. - -``` -SELECT COUNT(*) FROM temp_tb_1 INTERVAL(1m) SLIDING(2m); -``` - -When the time length specified by `SLIDING` is the same as that specified by `INTERVAL`, the sliding window is actually a flip/tumbling window. The minimum time range specified by `INTERVAL` is 10 milliseconds (10a) prior to version 2.1.5.0. Since version 2.1.5.0, the minimum time range by `INTERVAL` can be 1 microsecond (1u). However, if the DB precision is millisecond, the minimum time range is 1 millisecond (1a). Please note that the `timezone` parameter should be configured to be the same value in the `taos.cfg` configuration file on client side and server side. - -## Status Window - -In case of using integer, bool, or string to represent the status of a device at any given moment, continuous rows with the same status belong to a status window. Once the status changes, the status window closes. As shown in the following figure, there are two status windows according to status, [2019-04-28 14:22:07,2019-04-28 14:22:10] and [2019-04-28 14:22:11,2019-04-28 14:22:12]. Status window is not applicable to STable for now. -![TDengine Database Status Window](./timewindow-3.webp) +part_list can be any scalar expression, such as a column, constant, scalar function, or a combination of the preceding items. -`STATE_WINDOW` is used to specify the column on which the status window will be based. For example: +A PARTITION BY clause with a tag is processed as follows: -``` -SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status); -``` - -## Session Window +- The PARTITION BY clause must occur after the WHERE clause and cannot be used with a JOIN clause. +- The PARTITION BY clause partitions the super table by the specified tag group, and the specified calculation is performed on each partition. The calculation performed is determined by the rest of the statement - a window clause, GROUP BY clause, or SELECT clause. +- You can use PARTITION BY together with a window clause or GROUP BY clause. In this case, the window or GROUP BY clause takes effect on every partition. For example, the following statement partitions the table by the location tag, performs downsampling over a 10 minute window, and returns the maximum value: ```sql -SELECT COUNT(*), FIRST(ts) FROM temp_tb_1 SESSION(ts, tol_val); +select max(current) from meters partition by location interval(10m) ``` -The primary key, i.e. timestamp, is used to determine which session window a row belongs to. If the time interval between two adjacent rows is within the time range specified by `tol_val`, they belong to the same session window; otherwise they belong to two different session windows. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10,2019-04-28 14:22:30] and [2019-04-28 14:23:10,2019-04-28 14:23:30], because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds. - -![TDengine Database Session Window](./timewindow-2.webp) - -If the time interval between two continuous rows are within the time interval specified by `tol_value` they belong to the same session window; otherwise a new session window is started automatically. Session window is not supported on STable for now. - -## More On Window Aggregate - -### Syntax +## Windowed Queries -The full syntax of aggregate by window is as follows: +Aggregation by time window is supported in TDengine. For example, in the case where temperature sensors report the temperature every seconds, the average temperature for every 10 minutes can be retrieved by performing a query with a time window. Window related clauses are used to divide the data set to be queried into subsets and then aggregation is performed across the subsets. There are three kinds of windows: time window, status window, and session window. There are two kinds of time windows: sliding window and flip time/tumbling window. The query syntax is as follows: ```sql SELECT function_list FROM tb_name @@ -63,27 +38,36 @@ SELECT function_list FROM tb_name [STATE_WINDOW(col)] [INTERVAL(interval [, offset]) [SLIDING sliding]] [FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})] - -SELECT function_list FROM stb_name - [WHERE where_condition] - [INTERVAL(interval [, offset]) [SLIDING sliding]] - [FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})] - [GROUP BY tags] ``` -### Restrictions +The following restrictions apply: + +### Restricted Functions - Aggregate functions and select functions can be used in `function_list`, with each function having only one output. For example COUNT, AVG, SUM, STDDEV, LEASTSQUARES, PERCENTILE, MIN, MAX, FIRST, LAST. Functions having multiple outputs, such as DIFF or arithmetic operations can't be used. - `LAST_ROW` can't be used together with window aggregate. - Scalar functions, like CEIL/FLOOR, can't be used with window aggregate. + +### Other Rules + +- The window clause must occur after the PARTITION BY clause and before the GROUP BY clause. It cannot be used with a GROUP BY clause. +- SELECT clauses on windows can contain only the following expressions: + - Constants + - Aggregate functions + - Expressions that include the preceding expressions. +- The window clause cannot be used with a GROUP BY clause. - `WHERE` clause can be used to specify the starting and ending time and other filter conditions -- `FILL` clause is used to specify how to fill when there is data missing in any window, including: - 1. NONE: No fill (the default fill mode) - 2. VALUE:Fill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` - 3. PREV:Fill with the previous non-NULL value, `FILL(PREV)` - 4. NULL:Fill with NULL, `FILL(NULL)` - 5. LINEAR:Fill with the closest non-NULL value, `FILL(LINEAR)` - 6. NEXT:Fill with the next non-NULL value, `FILL(NEXT)` + +### FILL Clause + +`FILL` clause is used to specify how to fill when there is data missing in any window, including: + +1. NONE: No fill (the default fill mode) +2. VALUE:Fill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` Note: The value filled depends on the data type. For example, if you run FILL(VALUE 1.23) on an integer column, the value 1 is filled. +3. PREV:Fill with the previous non-NULL value, `FILL(PREV)` +4. NULL:Fill with NULL, `FILL(NULL)` +5. LINEAR:Fill with the closest non-NULL value, `FILL(LINEAR)` +6. NEXT:Fill with the next non-NULL value, `FILL(NEXT)` :::info @@ -93,17 +77,66 @@ SELECT function_list FROM stb_name ::: -Aggregate by time window is also used in continuous query, please refer to [Continuous Query](/develop/continuous-query). +### Time Window -## Examples +There are two kinds of time windows: sliding window and flip time/tumbling window. + +The `INTERVAL` clause is used to generate time windows of the same time interval. The `SLIDING` parameter is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining a continuous query, both the size of the time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e] ,[t1s , t1e], [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time/tumbling window. + +![TDengine Database Time Window](./timewindow-1.webp) + +`INTERVAL` and `SLIDING` should be used with aggregate functions and select functions. The SQL statement below is illegal because no aggregate or selection function is used with `INTERVAL`. + +``` +SELECT * FROM temp_tb_1 INTERVAL(1m); +``` + +The time step specified by `SLIDING` cannot exceed the time interval specified by `INTERVAL`. The SQL statement below is illegal because the time length specified by `SLIDING` exceeds that specified by `INTERVAL`. + +``` +SELECT COUNT(*) FROM temp_tb_1 INTERVAL(1m) SLIDING(2m); +``` + +When using time windows, note the following: + +- The window length for aggregation depends on the value of INTERVAL. The minimum interval is 10 ms. You can configure a window as an offset from UTC 0:00. The offset cannot be smaler than the interval. You can use SLIDING to specify the length of time that the window moves forward. +Please note that the `timezone` parameter should be configured to be the same value in the `taos.cfg` configuration file on client side and server side. +- The result set is in ascending order of timestamp when you aggregate by time window. + +### Status Window + +In case of using integer, bool, or string to represent the status of a device at any given moment, continuous rows with the same status belong to a status window. Once the status changes, the status window closes. As shown in the following figure, there are two status windows according to status, [2019-04-28 14:22:07,2019-04-28 14:22:10] and [2019-04-28 14:22:11,2019-04-28 14:22:12]. Status window is not applicable to STable for now. + +![TDengine Database Status Window](./timewindow-3.webp) + +`STATE_WINDOW` is used to specify the column on which the status window will be based. For example: + +``` +SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status); +``` + +### Session Window + +The primary key, i.e. timestamp, is used to determine which session window a row belongs to. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10,2019-04-28 14:22:30] and [2019-04-28 14:23:10,2019-04-28 14:23:30] because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds. + +![TDengine Database Session Window](./timewindow-2.webp) + +If the time interval between two continuous rows are within the time interval specified by `tol_value` they belong to the same session window; otherwise a new session window is started automatically. Session window is not supported on STable for now. + +``` + +SELECT COUNT(*), FIRST(ts) FROM temp_tb_1 SESSION(ts, tol_val); +``` + +### Examples A table of intelligent meters can be created by the SQL statement below: -```sql +``` CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT); ``` -The average current, maximum current and median of current in every 10 minutes for the past 24 hours can be calculated using the SQL statement below, with missing values filled with the previous non-NULL values. +The average current, maximum current and median of current in every 10 minutes for the past 24 hours can be calculated using the SQL statement below, with missing values filled with the previous non-NULL values. The query statement is as follows: ``` SELECT AVG(current), MAX(current), APERCENTILE(current, 50) FROM meters diff --git a/docs/en/12-taos-sql/13-tmq.md b/docs/en/12-taos-sql/13-tmq.md index 4d9c475a38..befab4f4f0 100644 --- a/docs/en/12-taos-sql/13-tmq.md +++ b/docs/en/12-taos-sql/13-tmq.md @@ -1,41 +1,34 @@ --- -sidebar_label: 消息队列 -title: 消息队列 +sidebar_label: Data Subscription +title: Data Subscription --- -TDengine 3.0.0.0 开始对消息队列做了大幅的优化和增强以简化用户的解决方案。 +The information in this document is related to the TDengine data subscription feature. -## 创建订阅主题 +## Create a Topic ```sql -CREATE TOPIC [IF NOT EXISTS] topic_name AS {subquery | DATABASE db_name | STABLE stb_name }; +CREATE TOPIC [IF NOT EXISTS] topic_name AS subquery; ``` -订阅主题包括三种:列订阅、超级表订阅和数据库订阅。 -**列订阅是**用 subquery 描述,支持过滤和标量函数和 UDF 标量函数,不支持 JOIN、GROUP BY、窗口切分子句、聚合函数和 UDF 聚合函数。列订阅规则如下: +You can use filtering, scalar functions, and user-defined scalar functions with a topic. JOIN, GROUP BY, windows, aggregate functions, and user-defined aggregate functions are not supported. The following rules apply to subscribing to a column: -1. TOPIC 一旦创建则返回结果的字段确定 -2. 被订阅或用于计算的列不可被删除、修改 -3. 列可以新增,但新增的列不出现在订阅结果字段中 -4. 对于 select \*,则订阅展开为创建时所有的列(子表、普通表为数据列,超级表为数据列加标签列) +1. The returned field is determined when the topic is created. +2. Columns to which a consumer is subscribed or that are involved in calculations cannot be deleted or modified. +3. If you add a column, the new column will not appear in the results for the subscription. +4. If you run `SELECT \*`, all columns in the subscription at the time of its creation are displayed. This includes columns in supertables, standard tables, and subtables. Supertables are shown as data columns plus tag columns. -**超级表订阅和数据库订阅**规则如下: -1. 被订阅主体的 schema 变更不受限 -2. 返回消息中 schema 是块级别的,每块的 schema 可能不一样 -3. 列变更后写入的数据若未落盘,将以写入时的 schema 返回 -4. 列变更后写入的数据若未已落盘,将以落盘时的 schema 返回 - -## 删除订阅主题 +## Delete a Topic ```sql DROP TOPIC [IF EXISTS] topic_name; ``` -此时如果该订阅主题上存在 consumer,则此 consumer 会收到一个错误。 +If a consumer is subscribed to the topic that you delete, the consumer will receive an error. -## 查看订阅主题 +## View Topics ## SHOW TOPICS @@ -43,24 +36,24 @@ DROP TOPIC [IF EXISTS] topic_name; SHOW TOPICS; ``` -显示当前数据库下的所有主题的信息。 +The preceding command displays all topics in the current database. -## 创建消费组 +## Create Consumer Group -消费组的创建只能通过 TDengine 客户端驱动或者连接器所提供的 API 创建。 +You can create consumer groups only through the TDengine Client driver or the API provided by a connector. -## 删除消费组 +## Delete Consumer Group ```sql DROP CONSUMER GROUP [IF EXISTS] cgroup_name ON topic_name; ``` -删除主题 topic_name 上的消费组 cgroup_name。 +This deletes the cgroup_name in the topic_name. -## 查看消费组 +## View Consumer Groups ```sql SHOW CONSUMERS; ``` -显示当前数据库下所有活跃的消费者的信息。 +The preceding command displays all consumer groups in the current database. diff --git a/docs/en/12-taos-sql/14-stream.md b/docs/en/12-taos-sql/14-stream.md index 7ff7da2bfb..fcd7876510 100644 --- a/docs/en/12-taos-sql/14-stream.md +++ b/docs/en/12-taos-sql/14-stream.md @@ -1,13 +1,13 @@ --- -sidebar_label: 流式计算 -title: 流式计算 +sidebar_label: Stream Processing +title: Stream Processing --- -在时序数据的处理中,经常要对原始数据进行清洗、预处理,再使用时序数据库进行长久的储存。用户通常需要在时序数据库之外再搭建 Kafka、Flink、Spark 等流计算处理引擎,增加了用户的开发成本和维护成本。 +Raw time-series data is often cleaned and preprocessed before being permanently stored in a database. Stream processing components like Kafka, Flink, and Spark are often deployed alongside a time-series database to handle these operations, increasing system complexity and maintenance costs. -使用 TDengine 3.0 的流式计算引擎能够最大限度的减少对这些额外中间件的依赖,真正将数据的写入、预处理、长期存储、复杂分析、实时计算、实时报警触发等功能融为一体,并且,所有这些任务只需要使用 SQL 完成,极大降低了用户的学习成本、使用成本。 +Because stream processing is built in to TDengine, you are no longer reliant on middleware. TDengine offers a unified platform for writing, preprocessing, permanent storage, complex analysis, and real-time computation and alerting. Additionally, you can use SQL to perform all these tasks. -## 创建流式计算 +## Create a Stream ```sql CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name AS subquery @@ -18,7 +18,7 @@ stream_options: { ``` -其中 subquery 是 select 普通查询语法的子集: +The subquery is a subset of standard SELECT query syntax: ```sql subquery: SELECT [DISTINCT] select_list @@ -26,97 +26,74 @@ subquery: SELECT [DISTINCT] select_list [WHERE condition] [PARTITION BY tag_list] [window_clause] - [group_by_clause] ``` -不支持 order_by,limit,slimit,fill 语句 +Session windows, state windows, and sliding windows are supported. When you configure a session or state window for a supertable, you must use PARTITION BY TBNAME. -例如,如下语句创建流式计算,同时自动创建名为 avg_vol 的超级表,此流计算以一分钟为时间窗口、30 秒为前向增量统计这些电表的平均电压,并将来自 meters 表的数据的计算结果写入 avg_vol 表,不同 partition 的数据会分别创建子表并写入不同子表。 +```sql +window_clause: { + SESSION(ts_col, tol_val) + | STATE_WINDOW(col) + | INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] +} +``` + +`SESSION` indicates a session window, and `tol_val` indicates the maximum range of the time interval. If the time interval between two continuous rows are within the time interval specified by `tol_val` they belong to the same session window; otherwise a new session window is started automatically. + +For example, the following SQL statement creates a stream and automatically creates a supertable named `avg_vol`. The stream has a 1 minute time window that slides forward in 30 second intervals to calculate the average voltage of the meters supertable. ```sql CREATE STREAM avg_vol_s INTO avg_vol AS SELECT _wstartts, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s); ``` -## 删除流式计算 +## Delete a Stream ```sql DROP STREAM [IF NOT EXISTS] stream_name ``` -仅删除流式计算任务,由流式计算写入的数据不会被删除。 +This statement deletes the stream processing service only. The data generated by the stream is retained. -## 展示流式计算 +## View Streams ```sql SHOW STREAMS; ``` -## 流式计算的触发模式 - -在创建流时,可以通过 TRIGGER 指令指定流式计算的触发模式。 - -对于非窗口计算,流式计算的触发是实时的;对于窗口计算,目前提供 3 种触发模式: - -1. AT_ONCE:写入立即触发 - -2. WINDOW_CLOSE:窗口关闭时触发(窗口关闭由事件时间决定,可配合 watermark 使用,详见《流式计算的乱序数据容忍策略》) - -3. MAX_DELAY time:若窗口关闭,则触发计算。若窗口未关闭,且未关闭时长超过 max delay 指定的时间,则触发计算。 - -由于窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,则事件时间无法更新,可能导致无法得到最新的计算结果。 +## Trigger Stream Processing -因此,流式计算提供了以事件时间结合处理时间计算的 MAX_DELAY 触发模式。 +When you create a stream, you can use the TRIGGER parameter to specify triggering conditions for it. -MAX_DELAY 模式在窗口关闭时会立即触发计算。此外,当数据写入后,计算触发的时间超过 max delay 指定的时间,则立即触发计算 +For non-windowed processing, triggering occurs in real time. For windowed processing, there are three methods of triggering: -## 流式计算的乱序数据容忍策略 +1. AT_ONCE: triggers on write -在创建流时,可以在 stream_option 中指定 watermark。 +2. WINDOW_CLOSE: triggers when the window closes. This is determined by the event time. You can use WINDOW_CLOSE together with `watermark`. For more information, see Stream Processing Strategy for Out-of-Order Data. -流式计算通过 watermark 来度量对乱序数据的容忍程度,watermark 默认为 0。 +3. MAX_DELAY: triggers when the window closes. If the window has not closed but the time elapsed exceeds MAX_DELAY, stream processing is also triggered. -T = 最新事件时间 - watermark +Because the window closing is determined by the event time, a delay or termination of an event stream will prevent the event time from being updated. This may result in an inability to obtain the latest results. -每批到来的数据都会以上述公式更新窗口关闭时间,并将窗口结束时间 < T 的所有打开的窗口关闭,若触发模式为 WINDOW_CLOSE 或 MAX_DELAY,则推送窗口聚合结果。 +For this reason, MAX_DELAY is provided as a way to ensure that processing occurs even if the window does not close. -流式计算的过期数据处理策略 -对于已关闭的窗口,再次落入该窗口中的数据被标记为过期数据,对于过期数据,流式计算提供两种处理方式: +MAX_DELAY also triggers when the window closes. Additionally, if a write occurs but the processing is not triggered before MAX_DELAY expires, processing is also triggered. -1. 直接丢弃:这是常见流式计算引擎提供的默认(甚至是唯一)计算模式 +## Stream Processing Strategy for Out-of-Order Data -2. 重新计算:从 TSDB 中重新查找对应窗口的所有数据并重新计算得到最新结果 +When you create a stream, you can specify a watermark in the `stream_option` parameter. -无论在哪种模式下,watermark 都应该被妥善设置,来得到正确结果(直接丢弃模式)或避免频繁触发重算带来的性能开销(重新计算模式)。 +The watermark is used to specify the tolerance for out-of-order data. The default value is 0. -## 流式计算的数据填充策略 +T = latest event time - watermark -TODO +The window closing time for each batch of data that arrives at the system is updated using the preceding formula, and all windows are closed whose closing time is less than T. If the triggering method is WINDOW_CLOSE or MAX_DELAY, the aggregate result for the window is pushed. -## 流式计算与会话窗口(session window) +Stream processing strategy for expired data +The data in expired windows is tagged as expired. TDengine stream processing provides two methods for handling such data: -```sql -window_clause: { - SESSION(ts_col, tol_val) - | STATE_WINDOW(col) - | INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [FILL(fill_mod_and_val)] -} -``` - -其中,SESSION 是会话窗口,tol_val 是时间间隔的最大范围。在 tol_val 时间间隔范围内的数据都属于同一个窗口,如果连续的两条数据的时间超过 tol_val,则自动开启下一个窗口。 +1. Drop the data. This is the default and often only handling method for most stream processing engines. -## 流式计算的监控与流任务分布查询 +2. Recalculate the data. In this method, all data in the window is reobtained from the database and recalculated. The latest results are then returned. -TODO - -## 流式计算的内存控制与存算分离 - -TODO - -## 流式计算的暂停与恢复 - -```sql -STOP STREAM stream_name; - -RESUME STREAM stream_name; -``` +In both of these methods, configuring the watermark is essential for obtaining accurate results (if expired data is dropped) and avoiding repeated triggers that affect system performance (if expired data is recalculated). diff --git a/docs/en/12-taos-sql/16-operators.md b/docs/en/12-taos-sql/16-operators.md index 0ca9ec4943..c426e28793 100644 --- a/docs/en/12-taos-sql/16-operators.md +++ b/docs/en/12-taos-sql/16-operators.md @@ -5,62 +5,62 @@ title: Operators ## Arithmetic Operators -| # | **Operator** | **Data Types** | **Description** | -| --- | :----------: | -------------- | --------------------------------------------------------- | -| 1 | +, - | Numeric Types | Representing positive or negative numbers, unary operator | -| 2 | +, - | Numeric Types | Addition and substraction, binary operator | -| 3 | \*, / | Numeric Types | Multiplication and division, binary oeprator | -| 4 | % | Numeric Types | Taking the remainder, binary operator | +| # | **Operator** | **Supported Data Types** | **Description** | +| --- | :--------: | -------------- | -------------------------- | +| 1 | +, - | Numeric | Expresses sign. Unary operators. | +| 2 | +, - | Numeric | Expresses addition and subtraction. Binary operators. | +| 3 | \*, / | Numeric | Expresses multiplication and division. Binary operators. | +| 4 | % | Numeric | Expresses modulo. Binary operator. | ## Bitwise Operators -| # | **Operator** | **Data Types** | **Description** | -| --- | :----------: | -------------- | ----------------------------- | -| 1 | & | Numeric Types | Bitewise AND, binary operator | -| 2 | \| | Numeric Types | Bitewise OR, binary operator | +| # | **Operator** | **Supported Data Types** | **Description** | +| --- | :--------: | -------------- | ------------------ | +| 1 | & | Numeric | Bitwise AND. Binary operator. | +| 2 | \| | Numeric | Bitwise OR. Binary operator. | -## JSON Operator +## JSON Operators -`->` operator can be used to get the value of a key in a column of JSON type, the left oeprand is the column name, the right operand is a string constant. For example, `col->'name'` returns the value of key `'name'`. +The `->` operator returns the value for a key in JSON column. Specify the column indicator on the left of the operator and the key name on the right of the operator. For example, `col->name` returns the value of the name key. -## Set Operator +## Set Operators -Set operators are used to combine the results of two queries into single result. A query including set operators is called a combined query. The number of rows in each result in a combined query must be same, and the type is determined by the first query's result, the type of the following queriess result must be able to be converted to the type of the first query's result, the conversion rule is same as `CAST` function. +Set operators combine the results of two queries. Queries that include set operators are known as compound queries. The expressions corresponding to each query in the select list in a compound query must match in number. The results returned take the data type of the first query, and the data type returned by subsequent queries must be convertible into the data type of the first query. The conditions of the `CAST` function apply to this conversion. -TDengine provides 2 set operators: `UNION ALL` and `UNION`. `UNION ALL` combines the results without removing duplicate data. `UNION` combines the results and remove duplicate data rows. In single SQL statement, at most 100 set operators can be used. +TDengine supports the `UNION` and `UNION ALL` operations. UNION ALL collects all query results and returns them as a composite result without deduplication. UNION collects all query results and returns them as a deduplicated composite result. In a single SQL statement, at most 100 set operators can be supported. -## Comparsion Operator +## Comparison Operators -| # | **Operator** | **Data Types** | **Description** | -| --- | :---------------: | ------------------------------------------------------------------- | ----------------------------------------------- | -| 1 | = | Except for BLOB, MEDIUMBLOB and JSON | Equal | -| 2 | <\>, != | Except for BLOB, MEDIUMBLOB, JSON and primary key of timestamp type | Not equal | -| 3 | \>, < | Except for BLOB, MEDIUMBLOB and JSON | Greater than, less than | -| 4 | \>=, <= | Except for BLOB, MEDIUMBLOB and JSON | Greater than or equal to, less than or equal to | -| 5 | IS [NOT] NULL | Any types | Is NULL or NOT | -| 6 | [NOT] BETWEEN AND | Except for BLOB, MEDIUMBLOB and JSON | In a value range or not | -| 7 | IN | Except for BLOB, MEDIUMBLOB, JSON and primary key of timestamp type | In a list of values or not | -| 8 | LIKE | BINARY, NCHAR and VARCHAR | Wildcard matching | -| 9 | MATCH, NMATCH | BINARY, NCHAR and VARCHAR | Regular expression matching | -| 10 | CONTAINS | JSON | If A key exists in JSON | +| # | **Operator** | **Supported Data Types** | **Description** | +| --- | :---------------: | -------------------------------------------------------------------- | -------------------- | +| 1 | = | All types except BLOB, MEDIUMBLOB, and JSON | Equal to | +| 2 | <\>, != | All types except BLOB, MEDIUMBLOB, and JSON; the primary key (timestamp) is also not supported | Not equal to | +| 3 | \>, < | All types except BLOB, MEDIUMBLOB, and JSON | Greater than and less than | +| 4 | \>=, <= | All types except BLOB, MEDIUMBLOB, and JSON | Greater than or equal to and less than or equal to | +| 5 | IS [NOT] NULL | All types | Indicates whether the value is null | +| 6 | [NOT] BETWEEN AND | All types except BLOB, MEDIUMBLOB, and JSON | Closed interval comparison | +| 7 | IN | All types except BLOB, MEDIUMBLOB, and JSON; the primary key (timestamp) is also not supported | Equal to any value in the list | +| 8 | LIKE | BINARY, NCHAR, and VARCHAR | Wildcard match | +| 9 | MATCH, NMATCH | BINARY, NCHAR, and VARCHAR | Regular expression match | +| 10 | CONTAINS | JSON | Indicates whether the key exists | -`LIKE` operator uses wildcard to match a string, the rules are: +LIKE is used together with wildcards to match strings. Its usage is described as follows: -- '%' matches 0 to any number of characters; '\_' matches any single ASCII character. -- \_ can be used to match a `_` in the string, i.e. using escape character backslash `\` -- Wildcard string is 100 bytes at most. Longer a wildcard string is, worse the performance of LIKE operator is. +- '%' matches 0 or any number of characters, '\_' matches any single ASCII character. +- `\_` is used to match the \_ in the string. +- The maximum length of wildcard string is 100 bytes. A very long wildcard string may slowdown the execution performance of `LIKE` operator. -`MATCH` and `NMATCH` operators use regular expressions to match a string, the rules are: +MATCH and NMATCH are used together with regular expressions to match strings. Their usage is described as follows: -- Regular expressions of POSIX standard are supported. -- Only `tbname`, i.e. table name of sub tables, and tag columns of string types can be matched with regular expression, data columns are not supported. -- Regular expression string is 128 bytes at most, and can be adjusted by setting parameter `maxRegexStringLen`, which is a client side configuration and needs to restart the client to take effect. +- Use POSIX regular expression syntax. For more information, see Regular Expressions. +- Regular expression can be used against only table names, i.e. `tbname`, and tags of binary/nchar types, but can't be used against data columns. +- The maximum length of regular expression string is 128 bytes. Configuration parameter `maxRegexStringLen` can be used to set the maximum allowed regular expression. It's a configuration parameter on the client side, and will take effect after restarting the client. ## Logical Operators -| # | **Operator** | **Data Types** | **Description** | -| --- | :----------: | -------------- | ---------------------------------------------------------------------------------------- | -| 1 | AND | BOOL | Logical AND, return TRUE if both conditions are TRUE; return FALSE if any one is FALSE. | -| 2 | OR | BOOL | Logical OR, return TRUE if any condition is TRUE; return FALSE if both are FALSE | +| # | **Operator** | **Supported Data Types** | **Description** | +| --- | :--------: | -------------- | --------------------------------------------------------------------------- | +| 1 | AND | BOOL | Logical AND; if both conditions are true, TRUE is returned; If either condition is false, FALSE is returned. +| 2 | OR | BOOL | Logical OR; if either condition is true, TRUE is returned; If both conditions are false, FALSE is returned. -TDengine uses shortcircut optimization when performing logical operations. For AND operator, if the first condition is evaluated to FALSE, then the second one is not evaluated. For OR operator, if the first condition is evaluated to TRUE, then the second one is not evaluated. +TDengine performs short-path optimization when calculating logical conditions. If the first condition for AND is false, FALSE is returned without calculating the second condition. If the first condition for OR is true, TRUE is returned without calculating the second condition diff --git a/docs/en/12-taos-sql/17-json.md b/docs/en/12-taos-sql/17-json.md index 7460a5e0ba..77f7743033 100644 --- a/docs/en/12-taos-sql/17-json.md +++ b/docs/en/12-taos-sql/17-json.md @@ -1,60 +1,64 @@ --- +sidebar_label: JSON Type title: JSON Type --- + ## Syntax 1. Tag of type JSON - ```sql - create STable s1 (ts timestamp, v1 int) tags (info json); + ``` + create stable s1 (ts timestamp, v1 int) tags (info json) - create table s1_1 using s1 tags ('{"k1": "v1"}'); + create table s1_1 using s1 tags ('{"k1": "v1"}') ``` 2. "->" Operator of JSON - ```sql - select * from s1 where info->'k1' = 'v1'; + ``` + select * from s1 where info->'k1' = 'v1' - select info->'k1' from s1; + select info->'k1' from s1 ``` 3. "contains" Operator of JSON - ```sql - select * from s1 where info contains 'k2'; + ``` + select * from s1 where info contains 'k2' - select * from s1 where info contains 'k1'; + select * from s1 where info contains 'k1' ``` ## Applicable Operations 1. When a JSON data type is used in `where`, `match/nmatch/between and/like/and/or/is null/is no null` can be used but `in` can't be used. - ```sql + ``` select * from s1 where info->'k1' match 'v*'; select * from s1 where info->'k1' like 'v%' and info contains 'k2'; select * from s1 where info is null; - select * from s1 where info->'k1' is not null; + select * from s1 where info->'k1' is not null ``` 2. A tag of JSON type can be used in `group by`, `order by`, `join`, `union all` and sub query; for example `group by json->'key'` 3. `Distinct` can be used with a tag of type JSON - ```sql - select distinct info->'k1' from s1; + ``` + select distinct info->'k1' from s1 ``` 4. Tag Operations The value of a JSON tag can be altered. Please note that the full JSON will be overriden when doing this. - The name of a JSON tag can be altered. A tag of JSON type can't be added or removed. The column length of a JSON tag can't be changed. + The name of a JSON tag can be altered. + + A tag of JSON type can't be added or removed. The column length of a JSON tag can't be changed. ## Other Restrictions @@ -64,19 +68,24 @@ title: JSON Type - JSON format: - - The input string for JSON can be empty, i.e. "", "\t", or NULL, but it can't be non-NULL string, bool or array. - - object can be {}, and the entire JSON is empty if so. Key can be "", and it's ignored if so. - - value can be int, double, string, bool or NULL, and it can't be an array. Nesting is not allowed which means that the value of a key can't be JSON. - - If one key occurs twice in JSON, only the first one is valid. - - Escape characters are not allowed in JSON. + - The input string for JSON can be empty, i.e. "", "\t", or NULL, but it can't be non-NULL string, bool or array. + - object can be {}, and the entire JSON is empty if so. Key can be "", and it's ignored if so. + - value can be int, double, string, bool or NULL, and it can't be an array. Nesting is not allowed which means that the value of a key can't be JSON. + - If one key occurs twice in JSON, only the first one is valid. + - Escape characters are not allowed in JSON. - NULL is returned when querying a key that doesn't exist in JSON. - If a tag of JSON is the result of inner query, it can't be parsed and queried in the outer query. -For example, the SQL statements below are not supported. + For example, the SQL statements below are not supported. -```sql; -select jtag->'key' from (select jtag from STable); -select jtag->'key' from (select jtag from STable) where jtag->'key'>0; -``` + ``` + select jtag->'key' from (select jtag from stable) + ``` + + and + + ``` + select jtag->'key' from (select jtag from stable) where jtag->'key'>0 + ``` diff --git a/docs/en/12-taos-sql/18-escape.md b/docs/en/12-taos-sql/18-escape.md index 34ce9f7848..46ab35a276 100644 --- a/docs/en/12-taos-sql/18-escape.md +++ b/docs/en/12-taos-sql/18-escape.md @@ -2,7 +2,7 @@ title: Escape Characters --- -Below table is the list of escape characters used in TDengine. +## Escape Characters | Escape Character | **Actual Meaning** | | :--------------: | ------------------------ | diff --git a/docs/en/12-taos-sql/19-limit.md b/docs/en/12-taos-sql/19-limit.md index ff552fc977..0486ea3094 100644 --- a/docs/en/12-taos-sql/19-limit.md +++ b/docs/en/12-taos-sql/19-limit.md @@ -1,59 +1,59 @@ --- -sidebar_label: 命名与边界限制 -title: 命名与边界限制 +sidebar_label: Name and Size Limits +title: Name and Size Limits --- -## 名称命名规则 +## Naming Rules -1. 合法字符:英文字符、数字和下划线 -2. 允许英文字符或下划线开头,不允许以数字开头 -3. 不区分大小写 -4. 转义后表(列)名规则: - 为了兼容支持更多形式的表(列)名,TDengine 引入新的转义符 "`"。可用让表名与关键词不冲突,同时不受限于上述表名称合法性约束检查 - 转义后的表(列)名同样受到长度限制要求,且长度计算的时候不计算转义符。使用转义字符以后,不再对转义字符中的内容进行大小写统一 +1. Names can include letters, digits, and underscores (_). +2. Names can begin with letters or underscores (_) but not with digits. +3. Names are not case-sensitive. +4. Rules for names with escape characters are as follows: + You can escape a name by enclosing it in backticks (`). In this way, you can reuse keyword names for table names. However, the first three naming rules no longer apply. + Table and column names that are enclosed in escape characters are still subject to length limits. When the length of such a name is calculated, the escape characters are not included. Names specified using escape character are case-sensitive. - 例如:\`aBc\` 和 \`abc\` 是不同的表(列)名,但是 abc 和 aBc 是相同的表(列)名。 - 需要注意的是转义字符中的内容必须是可打印字符。 + For example, \`aBc\` and \`abc\` are different table or column names, but "abc" and "aBc" are same names because internally they are all "abc". + Only ASCII visible characters can be used with escape character. -## 密码合法字符集 +## Password Rules `[a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/]` -去掉了 `` ‘“`\ `` (单双引号、撇号、反斜杠、空格) +The following characters cannot occur in a password: single quotation marks ('), double quotation marks ("), backticks (`), backslashes (\\), and spaces. -## 一般限制 +## General Limits -- 数据库名最大长度为 32 -- 表名最大长度为 192,不包括数据库名前缀和分隔符 -- 每行数据最大长度 48KB (注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置) -- 列名最大长度为 64 -- 最多允许 4096 列,最少需要 2 列,第一列必须是时间戳。 -- 标签名最大长度为 64 -- 最多允许 128 个,至少要有 1 个标签,一个表中标签值的总长度不超过 16KB -- SQL 语句最大长度 1048576 个字符,也可通过客户端配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576 -- SELECT 语句的查询结果,最多允许返回 4096 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错 -- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制 -- 数据库的副本数只能设置为 1 或 3 -- 用户名的最大长度是 23 个字节 -- 用户密码的最大长度是 15 个字节 -- 总数据行数取决于可用资源 -- 单个数据库的虚拟结点数上限为 1024 +- Maximum length of database name is 32 bytes +- Maximum length of table name is 192 bytes, excluding the database name prefix and the separator. +- Maximum length of each data row is 48K bytes. Note that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type. +- The maximum length of a column name is 64 bytes. +- Maximum number of columns is 4096. There must be at least 2 columns, and the first column must be timestamp. +- The maximum length of a tag name is 64 bytes +- Maximum number of tags is 128. There must be at least 1 tag. The total length of tag values cannot exceed 16 KB. +- Maximum length of single SQL statement is 1 MB (1048576 bytes). It can be configured in the parameter `maxSQLLength` in the client side, the applicable range is [65480, 1048576]. +- At most 4096 columns can be returned by `SELECT`. Functions in the query statement constitute columns. An error is returned if the limit is exceeded. +- Maximum numbers of databases, STables, tables are dependent only on the system resources. +- The number of replicas can only be 1 or 3. +- The maximum length of a username is 23 bytes. +- The maximum length of a password is 15 bytes. +- The maximum number of rows depends on system resources. +- The maximum number of vnodes in a database is 1024. -## 表(列)名合法性说明 +## Restrictions of Table/Column Names -### TDengine 中的表(列)名命名规则如下: +### Name Restrictions of Table/Column -只能由字母、数字、下划线构成,数字不能在首位,长度不能超过 192 字节,不区分大小写。这里表名称不包括数据库名的前缀和分隔符。 +The name of a table or column can only be composed of ASCII characters, digits and underscore and it cannot start with a digit. The maximum length is 192 bytes. Names are case insensitive. The name mentioned in this rule doesn't include the database name prefix and the separator. -### 转义后表(列)名规则: +### Name Restrictions After Escaping -为了兼容支持更多形式的表(列)名,TDengine 引入新的转义符 "`",可以避免表名与关键词的冲突,同时不受限于上述表名合法性约束检查,转义符不计入表名的长度。 -转义后的表(列)名同样受到长度限制要求,且长度计算的时候不计算转义符。使用转义字符以后,不再对转义字符中的内容进行大小写统一。 +To support more flexible table or column names, new escape character "\`" is introduced in TDengine to avoid the conflict between table name and keywords and break the above restrictions for table names. The escape character is not counted in the length of table name. +With escaping, the string inside escape characters are case sensitive, i.e. will not be converted to lower case internally. The table names specified using escape character are case sensitive. -例如: -\`aBc\` 和 \`abc\` 是不同的表(列)名,但是 abc 和 aBc 是相同的表(列)名。 +For example: +\`aBc\` and \`abc\` are different table or column names, but "abc" and "aBc" are same names because internally they are all "abc". :::note -转义字符中的内容必须是可打印字符。 +The characters inside escape characters must be printable characters. ::: diff --git a/docs/en/12-taos-sql/20-keywords.md b/docs/en/12-taos-sql/20-keywords.md index 6d40deb5a6..6f166c8034 100644 --- a/docs/en/12-taos-sql/20-keywords.md +++ b/docs/en/12-taos-sql/20-keywords.md @@ -1,10 +1,11 @@ --- -title: Keywords +sidebar_label: Reserved Keywords +title: Reserved Keywords --- -There are about 200 keywords reserved by TDengine, they can't be used as the name of database, STable or table with either upper case, lower case or mixed case. +## Keyword List -## Keywords List +There are about 200 keywords reserved by TDengine, they can't be used as the name of database, STable or table with either upper case, lower case or mixed case. The following list shows all reserved keywords: ### A @@ -57,70 +58,70 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam ### D -- DATABASE -- DATABASES -- DAYS -- DBS -- DEFERRED +- DATABASE +- DATABASES +- DAYS +- DBS +- DEFERRED - DELETE - DELIMITERS -- DESC -- DESCRIBE -- DETACH -- DISTINCT -- DIVIDE -- DNODE -- DNODES -- DOT -- DOUBLE -- DROP +- DESC +- DESCRIBE +- DETACH +- DISTINCT +- DIVIDE +- DNODE +- DNODES +- DOT +- DOUBLE +- DROP ### E -- END -- EQ -- EXISTS -- EXPLAIN +- END +- EQ +- EXISTS +- EXPLAIN ### F -- FAIL -- FILE -- FILL -- FLOAT -- FOR -- FROM -- FSYNC +- FAIL +- FILE +- FILL +- FLOAT +- FOR +- FROM +- FSYNC ### G -- GE -- GLOB +- GE +- GLOB - GRANTS -- GROUP -- GT +- GROUP +- GT ### H -- HAVING +- HAVING ### I - ID - IF -- IGNORE +- IGNORE - IMMEDIA -- IMPORT -- IN +- IMPORT +- IN - INITIAL -- INSERT +- INSERT - INSTEAD -- INT +- INT - INTEGER - INTERVA -- INTO -- IS -- ISNULL +- INTO +- IS +- ISNULL ### J @@ -129,187 +130,147 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam ### K - KEEP -- KEY +- KEY - KILL ### L -- LE -- LIKE -- LIMIT +- LE +- LIKE +- LIMIT - LINEAR -- LOCAL -- LP +- LOCAL +- LP - LSHIFT -- LT +- LT ### M -- MATCH -- MAXROWS -- MINROWS -- MINUS -- MNODES -- MODIFY -- MODULES +- MATCH +- MAXROWS +- MINROWS +- MINUS +- MNODES +- MODIFY +- MODULES ### N -- NE -- NONE -- NOT +- NE +- NONE +- NOT - NOTNULL -- NOW +- NOW - NULL ### O -- OF +- OF - OFFSET -- OR -- ORDER +- OR +- ORDER ### P - PARTITION -- PASS -- PLUS -- PPS +- PASS +- PLUS +- PPS - PRECISION -- PREV +- PREV - PRIVILEGE ### Q -- QTIME +- QTIME - QUERIE -- QUERY +- QUERY - QUORUM ### R -- RAISE -- REM +- RAISE +- REM - REPLACE - REPLICA -- RESET +- RESET - RESTRIC -- ROW -- RP +- ROW +- RP - RSHIFT ### S -- SCORES -- SELECT -- SEMI +- SCORES +- SELECT +- SEMI - SESSION -- SET -- SHOW -- SLASH +- SET +- SHOW +- SLASH - SLIDING -- SLIMIT +- SLIMIT - SMALLIN - SOFFSET -- STable +- STable - STableS -- STAR -- STATE +- STAR +- STATE - STATEMEN - STATE_WI -- STORAGE -- STREAM -- STREAMS -- STRING -- SYNCDB +- STORAGE +- STREAM +- STREAMS +- STRING +- SYNCDB ### T -- TABLE -- TABLES -- TAG -- TAGS -- TBNAME -- TIMES -- TIMESTAMP -- TINYINT -- TOPIC -- TOPICS -- TRIGGER -- TSERIES +- TABLE +- TABLES +- TAG +- TAGS +- TBNAME +- TIMES +- TIMESTAMP +- TINYINT +- TOPIC +- TOPICS +- TRIGGER +- TSERIES ### U -- UMINUS -- UNION -- UNSIGNED -- UPDATE -- UPLUS -- USE -- USER -- USERS -- USING +- UMINUS +- UNION +- UNSIGNED +- UPDATE +- UPLUS +- USE +- USER +- USERS +- USING ### V -- VALUES -- VARIABLE +- VALUES +- VARIABLE - VARIABLES -- VGROUPS -- VIEW -- VNODES +- VGROUPS +- VIEW +- VNODES ### W - WAL - WHERE -### _ - -- _C0 -- _QSTART -- _QSTOP -- _QDURATION -- _WSTART -- _WSTOP -- _WDURATION - -## Explanations -### TBNAME -`TBNAME` can be considered as a special tag, which represents the name of the subtable, in a STable. - -Get the table name and tag values of all subtables in a STable. -```mysql -SELECT TBNAME, location FROM meters; -``` - -Count the number of subtables in a STable. -```mysql -SELECT COUNT(TBNAME) FROM meters; -``` - -Only filter on TAGS can be used in WHERE clause in the above two query statements. -```mysql -taos> SELECT TBNAME, location FROM meters; - tbname | location | -================================================================== - d1004 | California.SanFrancisco | - d1003 | California.SanFrancisco | - d1002 | California.LosAngeles | - d1001 | California.LosAngeles | -Query OK, 4 row(s) in set (0.000881s) - -taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2; - count(tbname) | -======================== - 2 | -Query OK, 1 row(s) in set (0.001091s) -``` -### _QSTART/_QSTOP/_QDURATION -The start, stop and duration of a query time window. - -### _WSTART/_WSTOP/_WDURATION -The start, stop and duration of aggegate query by time window, like interval, session window, state window. - -### _c0/_ROWTS -_c0 is equal to _ROWTS, it means the first column of a table or STable. +### \_ + +- \_C0 +- \_QSTART +- \_QSTOP +- \_QDURATION +- \_WSTART +- \_WSTOP +- \_WDURATION diff --git a/docs/en/12-taos-sql/21-node.md b/docs/en/12-taos-sql/21-node.md index 4816daf420..8bb895f73c 100644 --- a/docs/en/12-taos-sql/21-node.md +++ b/docs/en/12-taos-sql/21-node.md @@ -1,37 +1,37 @@ --- -sidebar_label: 集群管理 -title: 集群管理 +sidebar_label: Cluster +title: Cluster --- -组成 TDengine 集群的物理实体是 dnode (data node 的缩写),它是一个运行在操作系统之上的进程。在 dnode 中可以建立负责时序数据存储的 vnode (virtual node),在多节点集群环境下当某个数据库的 replica 为 3 时,该数据库中的每个 vgroup 由 3 个 vnode 组成;当数据库的 replica 为 1 时,该数据库中的每个 vgroup 由 1 个 vnode 组成。如果要想配置某个数据库为多副本,则集群中的 dnode 数量至少为 3。在 dnode 还可以创建 mnode (management node),单个集群中最多可以创建三个 mnode。在 TDengine 3.0.0.0 中为了支持存算分离,引入了一种新的逻辑节点 qnode (query node),qnode 和 vnode 既可以共存在一个 dnode 中,也可以完全分离在不同的 dnode 上。 +The physical entities that form TDengine clusters are known as data nodes (dnodes). Each dnode is a process running on the operating system of the physical machine. Dnodes can contain virtual nodes (vnodes), which store time-series data. Virtual nodes are formed into vgroups, which have 1 or 3 vnodes depending on the replica setting. If you want to enable replication on your cluster, it must contain at least three nodes. Dnodes can also contain management nodes (mnodes). Each cluster has up to three mnodes. Finally, dnodes can contain query nodes (qnodes), which compute time-series data, thus separating compute from storage. A single dnode can contain a vnode, qnode, and mnode. -## 创建数据节点 +## Create a Dnode ```sql CREATE DNODE {dnode_endpoint | dnode_host_name PORT port_val} ``` -其中 `dnode_endpoint` 是形成 `hostname:port`的格式。也可以分开指定 hostname 和 port。 +Enter the dnode_endpoint in hostname:port format. You can also specify the hostname and port as separate parameters. -实际操作中推荐先创建 dnode,再启动相应的 dnode 进程,这样该 dnode 就可以立即根据其配置文件中的 firstEP 加入集群。每个 dnode 在加入成功后都会被分配一个 ID。 +Create the dnode before starting the corresponding dnode process. The dnode can then join the cluster based on the value of the firstEp parameter. Each dnode is assigned an ID after it joins a cluster. -## 查看数据节点 +## View Dnodes ```sql SHOW DNODES; ``` -可以列出集群中所有的数据节点,所列出的字段有 dnode 的 ID, endpoint, status。 +The preceding SQL command shows all dnodes in the cluster with the ID, endpoint, and status. -## 删除数据节点 +## Delete a DNODE ```sql DROP DNODE {dnode_id | dnode_endpoint} ``` -可以用 dnoe_id 或 endpoint 两种方式从集群中删除一个 dnode。注意删除 dnode 不等于停止相应的进程。实际中推荐先将一个 dnode 删除之后再停止其所对应的进程。 +You can delete a dnode by its ID or by its endpoint. Note that deleting a dnode does not stop its process. You must stop the process after the dnode is deleted. -## 修改数据节点配置 +## Modify Dnode Configuration ```sql ALTER DNODE dnode_id dnode_option @@ -62,59 +62,59 @@ dnode_option: { } ``` -上面语法中的这些可修改配置项其配置方式与 dnode 配置文件中的配置方式相同,区别是修改是动态的立即生效,且不需要重启 dnode。 +The parameters that you can modify through this statement are the same as those located in the dnode configuration file. Modifications that you make through this statement take effect immediately, while modifications to the configuration file take effect when the dnode restarts. -## 添加管理节点 +## Add an Mnode ```sql CREATE MNODE ON DNODE dnode_id ``` -系统启动默认在 firstEP 节点上创建一个 MNODE,用户可以使用此语句创建更多的 MNODE 来提高系统可用性。一个集群最多存在三个 MNODE,一个 DNODE 上只能创建一个 MNODE。 +TDengine automatically creates an mnode on the firstEp node. You can use this statement to create more mnodes for higher system availability. A cluster can have a maximum of three mnodes. Each dnode can contain only one mnode. -## 查看管理节点 +## View Mnodes ```sql SHOW MNODES; ``` -列出集群中所有的管理节点,包括其 ID,所在 DNODE 以及状态。 +This statement shows all mnodes in the cluster with the ID, dnode, and status. -## 删除管理节点 +## Delete an Mnode ```sql DROP MNODE ON DNODE dnode_id; ``` -删除 dnode_id 所指定的 DNODE 上的 MNODE。 +This statement deletes the mnode located on the specified dnode. -## 创建查询节点 +## Create a Qnode ```sql CREATE QNODE ON DNODE dnode_id; ``` -系统启动默认没有 QNODE,用户可以创建 QNODE 来实现计算和存储的分离。一个 DNODE 上只能创建一个 QNODE。一个 DNODE 的 `supportVnodes` 参数如果不为 0,同时又在其上创建上 QNODE,则在该 dnode 中既有负责存储管理的 vnode 又有负责查询计算的 qnode,如果还在该 dnode 上创建了 mnode,则一个 dnode 上最多三种逻辑节点都可以存在。但通过配置也可以使其彻底分离。将一个 dnode 的`supportVnodes`配置为 0,可以选择在其上创建 mnode 或者 qnode 中的一种,这样可以实现三种逻辑节点在物理上的彻底分离。 +TDengine does not automatically create qnodes on startup. You can create qnodes as necessary for compute/storage separation. Each dnode can contain only one qnode. If a qnode is created on a dnode whose supportVnodes parameter is not 0, a vnode and qnode may coexist on the dnode. Each dnode can have a maximum of one vnode, one qnode, and one mnode. However, you can configure your cluster so that vnodes, qnodes, and mnodes are located on separate dnodes. If you set supportVnodes to 0 for a dnode, you can then decide whether to deploy an mnode or a qnode on it. In this way you can physically separate virtual node types. -## 查看查询节点 +## View Qnodes ```sql SHOW QNODES; ``` -列出集群中所有查询节点,包括 ID,及所在 DNODE。 +This statement shows all qnodes in the cluster with the ID and dnode. -## 删除查询节点 +## Delete a Qnode ```sql DROP QNODE ON DNODE dnode_id; ``` -删除 ID 为 dnode_id 的 DNODE 上的 QNODE,但并不会影响该 dnode 的状态。 +This statement deletes the mnode located on the specified dnode. This does not affect the status of the dnode. -## 修改客户端配置 +## Modify Client Configuration -如果将客户端也看作广义的集群的一部分,可以通过如下命令动态修改客户端配置参数。 +The client configuration can also be modified in a similar way to other cluster components. ```sql ALTER LOCAL local_option @@ -129,26 +129,26 @@ local_option: { } ``` -上面语法中的参数与在配置文件中配置客户端的用法相同,但不需要重启客户端,修改后立即生效。 +The parameters that you can modify through this statement are the same as those located in the client configuration file. Modifications that you make through this statement take effect immediately, while modifications to the configuration file take effect when the client restarts. -## 查看客户端配置 +## View Client Configuration ```sql SHOW LOCAL VARIABLES; ``` -## 合并 vgroup +## Combine Vgroups ```sql MERGE VGROUP vgroup_no1 vgroup_no2; ``` -如果在系统实际运行一段时间后,因为不同时间线的数据特征不同导致在 vgroups 之间的数据和负载分布不均衡,可以通过合并或拆分 vgroups 的方式逐步实现负载均衡。 +If load and data are not properly balanced among vgroups due to the data in different tim lines having different characteristics, you can combine or separate vgroups. -## 拆分 vgroup +## Separate Vgroups ```sql SPLIT VGROUP vgroup_no; ``` -会创建一个新的 vgroup,并将指定 vgroup 中的数据按照一致性 HASH 迁移一部分到新的 vgroup 中。此过程中,原 vgroup 可以正常提供读写服务。 +This statement creates a new vgroup and migrates part of the data from the original vgroup to the new vgroup with consistent hashing. During this process, the original vgroup can continue to provide services normally. diff --git a/docs/en/12-taos-sql/22-meta.md b/docs/en/12-taos-sql/22-meta.md index 1e17870685..796b25dcb0 100644 --- a/docs/en/12-taos-sql/22-meta.md +++ b/docs/en/12-taos-sql/22-meta.md @@ -1,247 +1,247 @@ --- -sidebar_label: 元数据库 -title: 元数据库 +sidebar_label: Metadata +title: Information_Schema Database --- -TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数据库元数据、数据库系统信息和状态的访问,例如数据库或表的名称,当前执行的 SQL 语句等。该数据库存储有关 TDengine 维护的所有其他数据库的信息。它包含多个只读表。实际上,这些表都是视图,而不是基表,因此没有与它们关联的文件。所以对这些表只能查询,不能进行 INSERT 等写入操作。`INFORMATION_SCHEMA` 数据库旨在以一种更一致的方式来提供对 TDengine 支持的各种 SHOW 语句(如 SHOW TABLES、SHOW DATABASES)所提供的信息的访问。与 SHOW 语句相比,使用 SELECT ... FROM INFORMATION_SCHEMA.tablename 具有以下优点: +TDengine includes a built-in database named `INFORMATION_SCHEMA` to provide access to database metadata, system information, and status information. This information includes database names, table names, and currently running SQL statements. All information related to TDengine maintenance is stored in this database. It contains several read-only tables. These tables are more accurately described as views, and they do not correspond to specific files. You can query these tables but cannot write data to them. The INFORMATION_SCHEMA database is intended to provide a unified method for SHOW commands to access data. However, using SELECT ... FROM INFORMATION_SCHEMA.tablename offers several advantages over SHOW commands: -1. 可以使用 USE 语句将 INFORMATION_SCHEMA 设为默认数据库 -2. 可以使用 SELECT 语句熟悉的语法,只需要学习一些表名和列名 -3. 可以对查询结果进行筛选、排序等操作。事实上,可以使用任意 TDengine 支持的 SELECT 语句对 INFORMATION_SCHEMA 中的表进行查询 -4. TDengine 在后续演进中可以灵活的添加已有 INFORMATION_SCHEMA 中表的列,而不用担心对既有业务系统造成影响 -5. 与其他数据库系统更具互操作性。例如,Oracle 数据库用户熟悉查询 Oracle 数据字典中的表 +1. You can use a USE statement to specify the INFORMATION_SCHEMA database as the current database. +2. You can use the familiar SELECT syntax to access information, provided that you know the table and column names. +3. You can filter and order the query results. More generally, you can use any SELECT syntax that TDengine supports to query the INFORMATION_SCHEMA database. +4. Future versions of TDengine can add new columns to INFORMATION_SCHEMA tables without affecting existing business systems. +5. It is easier for users coming from other database management systems. For example, Oracle users can query data dictionary tables. -Note: 由于 SHOW 语句已经被开发者熟悉和广泛使用,所以它们仍然被保留。 +Note: SHOW statements are still supported for the convenience of existing users. -本章将详细介绍 `INFORMATION_SCHEMA` 这个内置元数据库中的表和表结构。 +This document introduces the tables of INFORMATION_SCHEMA and their structure. ## INS_DNODES -提供 dnode 的相关信息。也可以使用 SHOW DNODES 来查询这些信息。 +Provides information about dnodes. Similar to SHOW DNODES. -| # | **列名** | **数据类型** | **说明** | +| # | **Column** | **Data Type** | **Description** | | --- | :------------: | ------------ | ------------------------- | -| 1 | vnodes | SMALLINT | dnode 中的实际 vnode 个数 | -| 2 | support_vnodes | SMALLINT | 最多支持的 vnode 个数 | -| 3 | status | BINARY(10) | 当前状态 | -| 4 | note | BINARY(256) | 离线原因等信息 | -| 5 | id | SMALLINT | dnode id | -| 6 | endpoint | BINARY(134) | dnode 的地址 | -| 7 | create | TIMESTAMP | 创建时间 | +| 1 | vnodes | SMALLINT | Current number of vnodes on the dnode | +| 2 | vnodes | SMALLINT | Maximum number of vnodes on the dnode | +| 3 | status | BINARY(10) | Current status | +| 4 | note | BINARY(256) | Reason for going offline or other information | +| 5 | id | SMALLINT | Dnode ID | +| 6 | endpoint | BINARY(134) | Dnode endpoint | +| 7 | create | TIMESTAMP | Creation time | ## INS_MNODES -提供 mnode 的相关信息。也可以使用 SHOW MNODES 来查询这些信息。 +Provides information about mnodes. Similar to SHOW MNODES. -| # | **列名** | **数据类型** | **说明** | +| # | **Column** | **Data Type** | **Description** | | --- | :---------: | ------------ | ------------------ | -| 1 | id | SMALLINT | mnode id | -| 2 | endpoint | BINARY(134) | mnode 的地址 | -| 3 | role | BINARY(10) | 当前角色 | -| 4 | role_time | TIMESTAMP | 成为当前角色的时间 | -| 5 | create_time | TIMESTAMP | 创建时间 | +| 1 | id | SMALLINT | Mnode ID | +| 2 | endpoint | BINARY(134) | Mnode endpoint | +| 3 | role | BINARY(10) | Current role | +| 4 | role_time | TIMESTAMP | Time at which the current role was assumed | +| 5 | create_time | TIMESTAMP | Creation time | ## INS_MODULES -提供组件的相关信息。也可以使用 SHOW MODULES 来查询这些信息 +Provides information about modules. Similar to SHOW MODULES. -| # | **列名** | **数据类型** | **说明** | +| # | **Column** | **Data Type** | **Description** | | --- | :------: | ------------ | ---------- | -| 1 | id | SMALLINT | module id | -| 2 | endpoint | BINARY(134) | 组件的地址 | -| 3 | module | BINARY(10) | 组件状态 | +| 1 | id | SMALLINT | Module ID | +| 2 | endpoint | BINARY(134) | Module endpoint | +| 3 | module | BINARY(10) | Module status | ## INS_QNODES -当前系统中 QNODE 的信息。也可以使用 SHOW QNODES 来查询这些信息。 +Provides information about qnodes. Similar to SHOW QNODES. -| # | **列名** | **数据类型** | **说明** | +| # | **Column** | **Data Type** | **Description** | | --- | :---------: | ------------ | ------------ | -| 1 | id | SMALLINT | qnode id | -| 2 | endpoint | BINARY(134) | qnode 的地址 | -| 3 | create_time | TIMESTAMP | 创建时间 | +| 1 | id | SMALLINT | Qnode ID | +| 2 | endpoint | BINARY(134) | Qnode endpoint | +| 3 | create_time | TIMESTAMP | Creation time | ## INS_CLUSTER -存储集群相关信息。 +Provides information about the cluster. -| # | **列名** | **数据类型** | **说明** | +| # | **Column** | **Data Type** | **Description** | | --- | :---------: | ------------ | ---------- | -| 1 | id | BIGINT | cluster id | -| 2 | name | BINARY(134) | 集群名称 | -| 3 | create_time | TIMESTAMP | 创建时间 | +| 1 | id | BIGINT | Cluster ID | +| 2 | name | BINARY(134) | Cluster name | +| 3 | create_time | TIMESTAMP | Creation time | ## INS_DATABASES -提供用户创建的数据库对象的相关信息。也可以使用 SHOW DATABASES 来查询这些信息。 +Provides information about user-created databases. Similar to SHOW DATABASES. -| # | **列名** | **数据类型** | **说明** | +| # | **Column** | **Data Type** | **Description** | | --- | :------------------: | ---------------- | ------------------------------------------------ | -| 1 | name | BINARY(32) | 数据库名 | -| 2 | create_time | TIMESTAMP | 创建时间 | -| 3 | ntables | INT | 数据库中表的数量,包含子表和普通表但不包含超级表 | -| 4 | vgroups | INT | 数据库中有多少个 vgroup | -| 6 | replica | INT | 副本数 | -| 7 | quorum | BINARY(3) | 强一致性 | -| 8 | duration | INT | 单文件存储数据的时间跨度 | -| 9 | keep | INT | 数据保留时长 | -| 10 | buffer | INT | 每个 vnode 写缓存的内存块大小,单位 MB | -| 11 | pagesize | INT | 每个 VNODE 中元数据存储引擎的页大小,单位为 KB | -| 12 | pages | INT | 每个 vnode 元数据存储引擎的缓存页个数 | -| 13 | minrows | INT | 文件块中记录的最大条数 | -| 14 | maxrows | INT | 文件块中记录的最小条数 | -| 15 | comp | INT | 数据压缩方式 | -| 16 | precision | BINARY(2) | 时间分辨率 | -| 17 | status | BINARY(10) | 数据库状态 | -| 18 | retention | BINARY (60) | 数据的聚合周期和保存时长 | -| 19 | single_stable | BOOL | 表示此数据库中是否只可以创建一个超级表 | -| 20 | cachemodel | BINARY(60) | 表示是否在内存中缓存子表的最近数据 | -| 21 | cachesize | INT | 表示每个 vnode 中用于缓存子表最近数据的内存大小 | -| 22 | wal_level | INT | WAL 级别 | -| 23 | wal_fsync_period | INT | 数据落盘周期 | -| 24 | wal_retention_period | INT | WAL 的保存时长 | -| 25 | wal_retention_size | INT | WAL 的保存上限 | -| 26 | wal_roll_period | INT | wal 文件切换时长 | -| 27 | wal_segment_size | wal 单个文件大小 | +| 1| name| BINARY(32)| Database name | +| 2 | create_time | TIMESTAMP | Creation time | +| 3 | ntables | INT | Number of standard tables and subtables (not including supertables) | +| 4 | vgroups | INT | Number of vgroups | +| 6 | replica | INT | Number of replicas | +| 7 | quorum | BINARY(3) | Strong consistency | +| 8 | duration | INT | Duration for storage of single files | +| 9 | keep | INT | Data retention period | +| 10 | buffer | INT | Write cache size per vnode, in MB | +| 11 | pagesize | INT | Page size for vnode metadata storage engine, in KB | +| 12 | pages | INT | Number of pages per vnode metadata storage engine | +| 13 | minrows | INT | Maximum number of records per file block | +| 14 | maxrows | INT | Minimum number of records per file block | +| 15 | comp | INT | Compression method | +| 16 | precision | BINARY(2) | Time precision | +| 17 | status | BINARY(10) | Current database status | +| 18 | retention | BINARY (60) | Aggregation interval and retention period | +| 19 | single_stable | BOOL | Whether the database can contain multiple supertables | +| 20 | cachemodel | BINARY(60) | Caching method for the newest data | +| 21 | cachesize | INT | Memory per vnode used for caching the newest data | +| 22 | wal_level | INT | WAL level | +| 23 | wal_fsync_period | INT | Interval at which WAL is written to disk | +| 24 | wal_retention_period | INT | WAL retention period | +| 25 | wal_retention_size | INT | Maximum WAL size | +| 26 | wal_roll_period | INT | WAL rotation period | +| 27 | wal_segment_size | WAL file size | ## INS_FUNCTIONS -用户创建的自定义函数的信息。 +Provides information about user-defined functions. -| # | **列名** | **数据类型** | **说明** | +| # | **Column** | **Data Type** | **Description** | | --- | :---------: | ------------ | -------------- | -| 1 | name | BINARY(64) | 函数名 | -| 2 | comment | BINARY(255) | 补充说明 | -| 3 | aggregate | INT | 是否为聚合函数 | -| 4 | output_type | BINARY(31) | 输出类型 | -| 5 | create_time | TIMESTAMP | 创建时间 | -| 6 | code_len | INT | 代码长度 | -| 7 | bufsize | INT | buffer 大小 | +| 1 | name | BINARY(64) | Function name | +| 2 | comment | BINARY(255) | Function description | +| 3 | aggregate | INT | Whether the UDF is an aggregate function | +| 4 | output_type | BINARY(31) | Output data type | +| 5 | create_time | TIMESTAMP | Creation time | +| 6 | code_len | INT | Length of the source code | +| 7 | bufsize | INT | Buffer size | ## INS_INDEXES -提供用户创建的索引的相关信息。也可以使用 SHOW INDEX 来查询这些信息。 +Provides information about user-created indices. Similar to SHOW INDEX. -| # | **列名** | **数据类型** | **说明** | +| # | **Column** | **Data Type** | **Description** | | --- | :--------------: | ------------ | ---------------------------------------------------------------------------------- | -| 1 | db_name | BINARY(32) | 包含此索引的表所在的数据库名 | -| 2 | table_name | BINARY(192) | 包含此索引的表的名称 | -| 3 | index_name | BINARY(192) | 索引名 | -| 4 | column_name | BINARY(64) | 建索引的列的列名 | -| 5 | index_type | BINARY(10) | 目前有 SMA 和 FULLTEXT | -| 6 | index_extensions | BINARY(256) | 索引的额外信息。对 SMA 类型的索引,是函数名的列表。对 FULLTEXT 类型的索引为 NULL。 | +| 1 | db_name | BINARY(32) | Database containing the table with the specified index | +| 2 | table_name | BINARY(192) | Table containing the specified index | +| 3 | index_name | BINARY(192) | Index name | +| 4 | db_name | BINARY(64) | Index column | +| 5 | index_type | BINARY(10) | SMA or FULLTEXT index | +| 6 | index_extensions | BINARY(256) | Other information For SMA indices, this shows a list of functions. For FULLTEXT indices, this is null. | ## INS_STABLES -提供用户创建的超级表的相关信息。 +Provides information about supertables. -| # | **列名** | **数据类型** | **说明** | +| # | **Column** | **Data Type** | **Description** | | --- | :-----------: | ------------ | ------------------------ | -| 1 | stable_name | BINARY(192) | 超级表表名 | -| 2 | db_name | BINARY(64) | 超级表所在的数据库的名称 | -| 3 | create_time | TIMESTAMP | 创建时间 | -| 4 | columns | INT | 列数目 | -| 5 | tags | INT | 标签数目 | -| 6 | last_update | TIMESTAMP | 最后更新时间 | -| 7 | table_comment | BINARY(1024) | 表注释 | -| 8 | watermark | BINARY(64) | 窗口的关闭时间 | -| 9 | max_delay | BINARY(64) | 推送计算结果的最大延迟 | -| 10 | rollup | BINARY(128) | rollup 聚合函数 | +| 1 | stable_name | BINARY(192) | Supertable name | +| 2 | db_name | BINARY(64) | All databases in the supertable | +| 3 | create_time | TIMESTAMP | Creation time | +| 4 | columns | INT | Number of columns | +| 5 | tags | INT | Number of tags | +| 6 | last_update | TIMESTAMP | Last updated time | +| 7 | table_comment | BINARY(1024) | Table description | +| 8 | watermark | BINARY(64) | Window closing time | +| 9 | max_delay | BINARY(64) | Maximum delay for pushing stream processing results | +| 10 | rollup | BINARY(128) | Rollup aggregate function | ## INS_TABLES -提供用户创建的普通表和子表的相关信息 +Provides information about standard tables and subtables. -| # | **列名** | **数据类型** | **说明** | +| # | **Column** | **Data Type** | **Description** | | --- | :-----------: | ------------ | ---------------- | -| 1 | table_name | BINARY(192) | 表名 | -| 2 | db_name | BINARY(64) | 数据库名 | -| 3 | create_time | TIMESTAMP | 创建时间 | -| 4 | columns | INT | 列数目 | -| 5 | stable_name | BINARY(192) | 所属的超级表表名 | -| 6 | uid | BIGINT | 表 id | -| 7 | vgroup_id | INT | vgroup id | -| 8 | ttl | INT | 表的生命周期 | -| 9 | table_comment | BINARY(1024) | 表注释 | -| 10 | type | BINARY(20) | 表类型 | +| 1 | table_name | BINARY(192) | Table name | +| 2 | db_name | BINARY(64) | Database name | +| 3 | create_time | TIMESTAMP | Creation time | +| 4 | columns | INT | Number of columns | +| 5 | stable_name | BINARY(192) | Supertable name | +| 6 | uid | BIGINT | Table ID | +| 7 | vgroup_id | INT | Vgroup ID | +| 8 | ttl | INT | Table time-to-live | +| 9 | table_comment | BINARY(1024) | Table description | +| 10 | type | BINARY(20) | Table type | ## INS_TAGS -| # | **列名** | **数据类型** | **说明** | +| # | **Column** | **Data Type** | **Description** | | --- | :---------: | ------------- | ---------------------- | -| 1 | table_name | BINARY(192) | 表名 | -| 2 | db_name | BINARY(64) | 该表所在的数据库的名称 | -| 3 | stable_name | BINARY(192) | 所属的超级表表名 | -| 4 | tag_name | BINARY(64) | tag 的名称 | -| 5 | tag_type | BINARY(64) | tag 的类型 | -| 6 | tag_value | BINARY(16384) | tag 的值 | +| 1 | table_name | BINARY(192) | Table name | +| 2 | db_name | BINARY(64) | Database name | +| 3 | stable_name | BINARY(192) | Supertable name | +| 4 | tag_name | BINARY(64) | Tag name | +| 5 | tag_type | BINARY(64) | Tag type | +| 6 | tag_value | BINARY(16384) | Tag value | ## INS_USERS -提供系统中创建的用户的相关信息。 +Provides information about TDengine users. -| # | **列名** | **数据类型** | **说明** | +| # | **Column** | **Data Type** | **Description** | | --- | :---------: | ------------ | -------- | -| 1 | user_name | BINARY(23) | 用户名 | -| 2 | privilege | BINARY(256) | 权限 | -| 3 | create_time | TIMESTAMP | 创建时间 | +| 1 | user_name | BINARY(23) | User name | +| 2 | privilege | BINARY(256) | User permissions | +| 3 | create_time | TIMESTAMP | Creation time | ## INS_GRANTS -提供企业版授权的相关信息。 +Provides information about TDengine Enterprise Edition permissions. -| # | **列名** | **数据类型** | **说明** | +| # | **Column** | **Data Type** | **Description** | | --- | :---------: | ------------ | -------------------------------------------------- | -| 1 | version | BINARY(9) | 企业版授权说明:official(官方授权的)/trial(试用的) | -| 2 | cpu_cores | BINARY(9) | 授权使用的 CPU 核心数量 | -| 3 | dnodes | BINARY(10) | 授权使用的 dnode 节点数量 | -| 4 | streams | BINARY(10) | 授权创建的流数量 | -| 5 | users | BINARY(10) | 授权创建的用户数量 | -| 6 | accounts | BINARY(10) | 授权创建的帐户数量 | -| 7 | storage | BINARY(21) | 授权使用的存储空间大小 | -| 8 | connections | BINARY(21) | 授权使用的客户端连接数量 | -| 9 | databases | BINARY(11) | 授权使用的数据库数量 | -| 10 | speed | BINARY(9) | 授权使用的数据点每秒写入数量 | -| 11 | querytime | BINARY(9) | 授权使用的查询总时长 | -| 12 | timeseries | BINARY(21) | 授权使用的测点数量 | -| 13 | expired | BINARY(5) | 是否到期,true:到期,false:未到期 | -| 14 | expire_time | BINARY(19) | 试用期到期时间 | +| 1 | version | BINARY(9) | Whether the deployment is a licensed or trial version | +| 2 | cpu_cores | BINARY(9) | CPU cores included in license | +| 3 | dnodes | BINARY(10) | Dnodes included in license | +| 4 | streams | BINARY(10) | Streams included in license | +| 5 | users | BINARY(10) | Users included in license | +| 6 | streams | BINARY(10) | Accounts included in license | +| 7 | storage | BINARY(21) | Storage space included in license | +| 8 | connections | BINARY(21) | Client connections included in license | +| 9 | databases | BINARY(11) | Databases included in license | +| 10 | speed | BINARY(9) | Write speed specified in license (data points per second) | +| 11 | querytime | BINARY(9) | Total query time specified in license | +| 12 | timeseries | BINARY(21) | Number of metrics included in license | +| 13 | expired | BINARY(5) | Whether the license has expired | +| 14 | expire_time | BINARY(19) | When the trial period expires | ## INS_VGROUPS -系统中所有 vgroups 的信息。 +Provides information about vgroups. -| # | **列名** | **数据类型** | **说明** | +| # | **Column** | **Data Type** | **Description** | | --- | :-------: | ------------ | ------------------------------------------------------ | -| 1 | vgroup_id | INT | vgroup id | -| 2 | db_name | BINARY(32) | 数据库名 | -| 3 | tables | INT | 此 vgroup 内有多少表 | -| 4 | status | BINARY(10) | 此 vgroup 的状态 | -| 5 | v1_dnode | INT | 第一个成员所在的 dnode 的 id | -| 6 | v1_status | BINARY(10) | 第一个成员的状态 | -| 7 | v2_dnode | INT | 第二个成员所在的 dnode 的 id | -| 8 | v2_status | BINARY(10) | 第二个成员的状态 | -| 9 | v3_dnode | INT | 第三个成员所在的 dnode 的 id | -| 10 | v3_status | BINARY(10) | 第三个成员的状态 | -| 11 | nfiles | INT | 此 vgroup 中数据/元数据文件的数量 | -| 12 | file_size | INT | 此 vgroup 中数据/元数据文件的大小 | -| 13 | tsma | TINYINT | 此 vgroup 是否专用于 Time-range-wise SMA,1: 是, 0: 否 | +| 1 | vgroup_id | INT | Vgroup ID | +| 2 | db_name | BINARY(32) | Database name | +| 3 | tables | INT | Tables in vgroup | +| 4 | status | BINARY(10) | Vgroup status | +| 5 | v1_dnode | INT | Dnode ID of first vgroup member | +| 6 | v1_status | BINARY(10) | Status of first vgroup member | +| 7 | v2_dnode | INT | Dnode ID of second vgroup member | +| 8 | v2_status | BINARY(10) | Status of second vgroup member | +| 9 | v3_dnode | INT | Dnode ID of third vgroup member | +| 10 | v3_status | BINARY(10) | Status of third vgroup member | +| 11 | nfiles | INT | Number of data and metadata files in the vgroup | +| 12 | file_size | INT | Size of the data and metadata files in the vgroup | +| 13 | tsma | TINYINT | Whether time-range-wise SMA is enabled. 1 means enabled; 0 means disabled. | ## INS_CONFIGS -系统配置参数。 +Provides system configuration information. -| # | **列名** | **数据类型** | **说明** | +| # | **Column** | **Data Type** | **Description** | | --- | :------: | ------------ | ------------ | -| 1 | name | BINARY(32) | 配置项名称 | -| 2 | value | BINARY(64) | 该配置项的值 | +| 1 | name | BINARY(32) | Parameter | +| 2 | value | BINARY(64) | Value | ## INS_DNODE_VARIABLES -系统中每个 dnode 的配置参数。 +Provides dnode configuration information. -| # | **列名** | **数据类型** | **说明** | +| # | **Column** | **Data Type** | **Description** | | --- | :------: | ------------ | ------------ | -| 1 | dnode_id | INT | dnode 的 ID | -| 2 | name | BINARY(32) | 配置项名称 | -| 3 | value | BINARY(64) | 该配置项的值 | +| 1 | dnode_id | INT | Dnode ID | +| 2 | name | BINARY(32) | Parameter | +| 3 | value | BINARY(64) | Value | diff --git a/docs/en/12-taos-sql/23-perf.md b/docs/en/12-taos-sql/23-perf.md new file mode 100644 index 0000000000..10a9338022 --- /dev/null +++ b/docs/en/12-taos-sql/23-perf.md @@ -0,0 +1,129 @@ +--- +sidebar_label: Statistics +title: Performance_Schema Database +--- + +TDengine includes a built-in database named `PERFORMANCE_SCHEMA` to provide access to database performance statistics. This document introduces the tables of PERFORMANCE_SCHEMA and their structure. + +## PERF_APP + +Provides information about clients (such as applications) that connect to the cluster. Similar to SHOW APPS. + +| # | **Column** | **Data Type** | **Description** | +| --- | :----------: | ------------ | ------------------------------- | +| 1 | app_id | UBIGINT | Client ID | +| 2 | ip | BINARY(16) | Client IP address | +| 3 | pid | INT | Client process | +| 4 | name | BINARY(24) | Client name | +| 5 | start_time | TIMESTAMP | Time when client was started | +| 6 | insert_req | UBIGINT | Insert requests | +| 7 | insert_row | UBIGINT | Rows inserted | +| 8 | insert_time | UBIGINT | Time spent processing insert requests in microseconds | +| 9 | insert_bytes | UBIGINT | Size of data inserted in byted | +| 10 | fetch_bytes | UBIGINT | Size of query results in bytes | +| 11 | query_time | UBIGINT | Time spend processing query requests | +| 12 | slow_query | UBIGINT | Number of slow queries (greater than or equal to 3 seconds) | +| 13 | total_req | UBIGINT | Total requests | +| 14 | current_req | UBIGINT | Requests currently being processed | +| 15 | last_access | TIMESTAMP | Last update time | + +## PERF_CONNECTIONS + +Provides information about connections to the database. Similar to SHOW CONNECTIONS. + +| # | **Column** | **Data Type** | **Description** | +| --- | :---------: | ------------ | -------------------------------------------------- | +| 1 | conn_id | INT | Connection ID | +| 2 | user | BINARY(24) | User name | +| 3 | app | BINARY(24) | Client name | +| 4 | pid | UINT | Client process ID on client device that initiated the connection | +| 5 | end_point | BINARY(128) | Client endpoint | +| 6 | login_time | TIMESTAMP | Login time | +| 7 | last_access | TIMESTAMP | Last update time | + +## PERF_QUERIES + +Provides information about SQL queries currently running. Similar to SHOW QUERIES. + +| # | **Column** | **Data Type** | **Description** | +| --- | :----------: | ------------ | ---------------------------- | +| 1 | kill_id | UBIGINT | ID used to stop the query | +| 2 | query_id | INT | Query ID | +| 3 | conn_id | UINT | Connection ID | +| 4 | app | BINARY(24) | Client name | +| 5 | pid | INT | Client process ID on client device | +| 6 | user | BINARY(24) | User name | +| 7 | end_point | BINARY(16) | Client endpoint | +| 8 | create_time | TIMESTAMP | Creation time | +| 9 | exec_usec | BIGINT | Elapsed time | +| 10 | stable_query | BOOL | Whether the query is on a supertable | +| 11 | sub_num | INT | Number of subqueries | +| 12 | sub_status | BINARY(1000) | Subquery status | +| 13 | sql | BINARY(1024) | SQL statement | + +## PERF_TOPICS + +| # | **Column** | **Data Type** | **Description** | +| --- | :---------: | ------------ | ------------------------------ | +| 1 | topic_name | BINARY(192) | Topic name | +| 2 | db_name | BINARY(64) | Database for the topic | +| 3 | create_time | TIMESTAMP | Creation time | +| 4 | sql | BINARY(1024) | SQL statement used to create the topic | + +## PERF_CONSUMERS + +| # | **Column** | **Data Type** | **Description** | +| --- | :------------: | ------------ | ----------------------------------------------------------- | +| 1 | consumer_id | BIGINT | Consumer ID | +| 2 | consumer_group | BINARY(192) | Consumer group | +| 3 | client_id | BINARY(192) | Client ID (user-defined) | +| 4 | status | BINARY(20) | Consumer status | +| 5 | topics | BINARY(204) | Subscribed topic. Returns one row for each topic. | +| 6 | up_time | TIMESTAMP | Time of first connection to TDengine Server | +| 7 | subscribe_time | TIMESTAMP | Time of first subscription | +| 8 | rebalance_time | TIMESTAMP | Time of first rebalance triggering | + +## PERF_SUBSCRIPTIONS + +| # | **Column** | **Data Type** | **Description** | +| --- | :------------: | ------------ | ------------------------ | +| 1 | topic_name | BINARY(204) | Subscribed topic | +| 2 | consumer_group | BINARY(193) | Subscribed consumer group | +| 3 | vgroup_id | INT | Vgroup ID for the consumer | +| 4 | consumer_id | BIGINT | Consumer ID | + +## PERF_TRANS + +| # | **Column** | **Data Type** | **Description** | +| --- | :--------------: | ------------ | -------------------------------------------------------------- | +| 1 | id | INT | ID of the transaction currently running | +| 2 | create_time | TIMESTAMP | Creation time | +| 3 | stage | BINARY(12) | Transaction stage (redoAction, undoAction, or commit) | +| 4 | db1 | BINARY(64) | First database having a conflict with the transaction | +| 5 | db2 | BINARY(64) | Second database having a conflict with the transaction | +| 6 | failed_times | INT | Times the transaction has failed | +| 7 | last_exec_time | TIMESTAMP | Previous time the transaction was run | +| 8 | last_action_info | BINARY(511) | Reason for failure on previous run | + +## PERF_SMAS + +| # | **Column** | **Data Type** | **Description** | +| --- | :---------: | ------------ | ------------------------------------------- | +| 1 | sma_name | BINARY(192) | Time-range-wise SMA name | +| 2 | create_time | TIMESTAMP | Creation time | +| 3 | stable_name | BINARY(192) | Supertable name | +| 4 | vgroup_id | INT | Dedicated vgroup name | + +## PERF_STREAMS + +| # | **Column** | **Data Type** | **Description** | +| --- | :----------: | ------------ | --------------------------------------- | +| 1 | stream_name | BINARY(64) | Stream name | +| 2 | create_time | TIMESTAMP | Creation time | +| 3 | sql | BINARY(1024) | SQL statement used to create the stream | +| 4 | status | BIANRY(20) | Current status | +| 5 | source_db | BINARY(64) | Source database | +| 6 | target_db | BIANRY(64) | Target database | +| 7 | target_table | BINARY(192) | Target table | +| 8 | watermark | BIGINT | Watermark (see stream processing documentation) | +| 9 | trigger | INT | Method of triggering the result push (see stream processing documentation) | diff --git a/docs/en/12-taos-sql/24-show.md b/docs/en/12-taos-sql/24-show.md index 781f94324c..96503c9598 100644 --- a/docs/en/12-taos-sql/24-show.md +++ b/docs/en/12-taos-sql/24-show.md @@ -1,9 +1,9 @@ --- -sidebar_label: SHOW 命令 -title: 使用 SHOW 命令查看系统元数据 +sidebar_label: SHOW Statement +title: SHOW Statement for Metadata --- -除了使用 `select` 语句查询 `INFORMATION_SCHEMA` 数据库中的表获得系统中的各种元数据、系统信息和状态之外,也可以用 `SHOW` 命令来实现同样的目的。 +In addition to running SELECT statements on INFORMATION_SCHEMA, you can also use SHOW to obtain system metadata, information, and status. ## SHOW ACCOUNTS @@ -11,9 +11,9 @@ title: 使用 SHOW 命令查看系统元数据 SHOW ACCOUNTS; ``` -显示当前系统中所有租户的信息。 +Shows information about tenants on the system. -注:企业版独有 +Note: TDengine Enterprise Edition only. ## SHOW APPS @@ -21,7 +21,7 @@ SHOW ACCOUNTS; SHOW APPS; ``` -显示接入集群的应用(客户端)信息。 +Shows all clients (such as applications) that connect to the cluster. ## SHOW BNODES @@ -29,7 +29,7 @@ SHOW APPS; SHOW BNODES; ``` -显示当前系统中存在的 BNODE (backup node, 即备份节点)的信息。 +Shows information about backup nodes (bnodes) in the system. ## SHOW CLUSTER @@ -37,7 +37,7 @@ SHOW BNODES; SHOW CLUSTER; ``` -显示当前集群的信息 +Shows information about the current cluster. ## SHOW CONNECTIONS @@ -45,7 +45,7 @@ SHOW CLUSTER; SHOW CONNECTIONS; ``` -显示当前系统中存在的连接的信息。 +Shows information about connections to the system. ## SHOW CONSUMERS @@ -53,7 +53,7 @@ SHOW CONNECTIONS; SHOW CONSUMERS; ``` -显示当前数据库下所有活跃的消费者的信息。 +Shows information about all active consumers in the system. ## SHOW CREATE DATABASE @@ -61,7 +61,7 @@ SHOW CONSUMERS; SHOW CREATE DATABASE db_name; ``` -显示 db_name 指定的数据库的创建语句。 +Shows the SQL statement used to create the specified database. ## SHOW CREATE STABLE @@ -69,7 +69,7 @@ SHOW CREATE DATABASE db_name; SHOW CREATE STABLE [db_name.]stb_name; ``` -显示 tb_name 指定的超级表的创建语句 +Shows the SQL statement used to create the specified supertable. ## SHOW CREATE TABLE @@ -77,7 +77,7 @@ SHOW CREATE STABLE [db_name.]stb_name; SHOW CREATE TABLE [db_name.]tb_name ``` -显示 tb_name 指定的表的创建语句。支持普通表、超级表和子表。 +Shows the SQL statement used to create the specified table. This statement can be used on supertables, standard tables, and subtables. ## SHOW DATABASES @@ -85,7 +85,7 @@ SHOW CREATE TABLE [db_name.]tb_name SHOW DATABASES; ``` -显示用户定义的所有数据库。 +Shows all user-created databases. ## SHOW DNODES @@ -93,7 +93,7 @@ SHOW DATABASES; SHOW DNODES; ``` -显示当前系统中 DNODE 的信息。 +Shows all dnodes in the system. ## SHOW FUNCTIONS @@ -101,7 +101,7 @@ SHOW DNODES; SHOW FUNCTIONS; ``` -显示用户定义的自定义函数。 +Shows all user-defined functions in the system. ## SHOW LICENSE @@ -110,9 +110,9 @@ SHOW LICENSE; SHOW GRANTS; ``` -显示企业版许可授权的信息。 +Shows information about the TDengine Enterprise Edition license. -注:企业版独有 +Note: TDengine Enterprise Edition only. ## SHOW INDEXES @@ -120,7 +120,7 @@ SHOW GRANTS; SHOW INDEXES FROM tbl_name [FROM db_name]; ``` -显示已创建的索引。 +Shows indices that have been created. ## SHOW LOCAL VARIABLES @@ -128,7 +128,7 @@ SHOW INDEXES FROM tbl_name [FROM db_name]; SHOW LOCAL VARIABLES; ``` -显示当前客户端配置参数的运行值。 +Shows the working configuration of the client. ## SHOW MNODES @@ -136,7 +136,7 @@ SHOW LOCAL VARIABLES; SHOW MNODES; ``` -显示当前系统中 MNODE 的信息。 +Shows information about mnodes in the system. ## SHOW MODULES @@ -144,7 +144,7 @@ SHOW MNODES; SHOW MODULES; ``` -显示当前系统中所安装的组件的信息。 +Shows information about modules installed in the system. ## SHOW QNODES @@ -152,7 +152,7 @@ SHOW MODULES; SHOW QNODES; ``` -显示当前系统中 QNODE (查询节点)的信息。 +Shows information about qnodes in the system. ## SHOW SCORES @@ -160,9 +160,9 @@ SHOW QNODES; SHOW SCORES; ``` -显示系统被许可授权的容量的信息。 +Shows information about the storage space allowed by the license. -注:企业版独有 +Note: TDengine Enterprise Edition only. ## SHOW SNODES @@ -170,7 +170,7 @@ SHOW SCORES; SHOW SNODES; ``` -显示当前系统中 SNODE (流计算节点)的信息。 +Shows information about stream processing nodes (snodes) in the system. ## SHOW STABLES @@ -178,7 +178,7 @@ SHOW SNODES; SHOW [db_name.]STABLES [LIKE 'pattern']; ``` -显示当前数据库下的所有超级表的信息。可以使用 LIKE 对表名进行模糊匹配。 +Shows all supertables in the current database. You can use LIKE for fuzzy matching. ## SHOW STREAMS @@ -186,7 +186,7 @@ SHOW [db_name.]STABLES [LIKE 'pattern']; SHOW STREAMS; ``` -显示当前系统内所有流计算的信息。 +Shows information about streams in the system. ## SHOW SUBSCRIPTIONS @@ -194,7 +194,7 @@ SHOW STREAMS; SHOW SUBSCRIPTIONS; ``` -显示当前数据库下的所有的订阅关系 +Shows all subscriptions in the current database. ## SHOW TABLES @@ -202,7 +202,7 @@ SHOW SUBSCRIPTIONS; SHOW [db_name.]TABLES [LIKE 'pattern']; ``` -显示当前数据库下的所有普通表和子表的信息。可以使用 LIKE 对表名进行模糊匹配。 +Shows all standard tables and subtables in the current database. You can use LIKE for fuzzy matching. ## SHOW TABLE DISTRIBUTED @@ -210,7 +210,7 @@ SHOW [db_name.]TABLES [LIKE 'pattern']; SHOW TABLE DISTRIBUTED table_name; ``` -显示表的数据分布信息。 +Shows how table data is distributed. ## SHOW TAGS @@ -218,7 +218,7 @@ SHOW TABLE DISTRIBUTED table_name; SHOW TAGS FROM child_table_name [FROM db_name]; ``` -显示子表的标签信息。 +Shows all tag information in a subtable. ## SHOW TOPICS @@ -226,7 +226,7 @@ SHOW TAGS FROM child_table_name [FROM db_name]; SHOW TOPICS; ``` -显示当前数据库下的所有主题的信息。 +Shows all topics in the current database. ## SHOW TRANSACTIONS @@ -234,7 +234,7 @@ SHOW TOPICS; SHOW TRANSACTIONS; ``` -显示当前系统中正在执行的事务的信息 +Shows all running transactions in the system. ## SHOW USERS @@ -242,7 +242,7 @@ SHOW TRANSACTIONS; SHOW USERS; ``` -显示当前系统中所有用户的信息。包括用户自定义的用户和系统默认用户。 +Shows information about users on the system. This includes user-created users and system-defined users. ## SHOW VARIABLES @@ -251,7 +251,7 @@ SHOW VARIABLES; SHOW DNODE dnode_id VARIABLES; ``` -显示当前系统中各节点需要相同的配置参数的运行值,也可以指定 DNODE 来查看其的配置参数。 +Shows the working configuration of the parameters that must be the same on each node. You can also specify a dnode to show the working configuration for that node. ## SHOW VGROUPS @@ -259,7 +259,7 @@ SHOW DNODE dnode_id VARIABLES; SHOW [db_name.]VGROUPS; ``` -显示当前系统中所有 VGROUP 或某个 db 的 VGROUPS 的信息。 +Shows information about all vgroups in the system or about the vgroups for a specified database. ## SHOW VNODES @@ -267,4 +267,4 @@ SHOW [db_name.]VGROUPS; SHOW VNODES [dnode_name]; ``` -显示当前系统中所有 VNODE 或某个 DNODE 的 VNODE 的信息。 +Shows information about all vnodes in the system or about the vnodes for a specified dnode. diff --git a/docs/en/12-taos-sql/25-grant.md b/docs/en/12-taos-sql/25-grant.md index 0c290350cc..37438ee780 100644 --- a/docs/en/12-taos-sql/25-grant.md +++ b/docs/en/12-taos-sql/25-grant.md @@ -1,29 +1,29 @@ --- -sidebar_label: 权限管理 -title: 权限管理 +sidebar_label: Permissions Management +title: Permissions Management --- -本节讲述如何在 TDengine 中进行权限管理的相关操作。 +This document describes how to manage permissions in TDengine. -## 创建用户 +## Create a User ```sql -CREATE USER use_name PASS password; +CREATE USER use_name PASS 'password'; ``` -创建用户。 +This statement creates a user account. -use_name最长为23字节。 +The maximum length of use_name is 23 bytes. -password最长为128字节,合法字符包括"a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/",不可以出现单双引号、撇号、反斜杠和空格,且不可以为空。 +The maximum length of password is 128 bytes. The password can include leters, digits, and special characters excluding single quotation marks, double quotation marks, backticks, backslashes, and spaces. The password cannot be empty. -## 删除用户 +## Delete a User ```sql DROP USER user_name; ``` -## 修改用户信息 +## Modify User Information ```sql ALTER USER user_name alter_user_clause @@ -35,12 +35,12 @@ alter_user_clause: { } ``` -- PASS:修改用户密码。 -- ENABLE:修改用户是否启用。1表示启用此用户,0表示禁用此用户。 -- SYSINFO:修改用户是否可查看系统信息。1表示可以查看系统信息,0表示不可以查看系统信息。 +- PASS: Modify the user password. +- ENABLE: Specify whether the user is enabled or disabled. 1 indicates enabled and 0 indicates disabled. +- SYSINFO: Specify whether the user can query system information. 1 indicates that the user can query system information and 0 indicates that the user cannot query system information. -## 授权 +## Grant Permissions ```sql GRANT privileges ON priv_level TO user_name @@ -61,15 +61,15 @@ priv_level : { } ``` -对用户授权。 +Grant permissions to a user. -授权级别支持到DATABASE,权限有READ和WRITE两种。 +Permissions are granted on the database level. You can grant read or write permissions. -TDengine 有超级用户和普通用户两类用户。超级用户缺省创建为root,拥有所有权限。使用超级用户创建出来的用户为普通用户。在未授权的情况下,普通用户可以创建DATABASE,并拥有自己创建的DATABASE的所有权限,包括删除数据库、修改数据库、查询时序数据和写入时序数据。超级用户可以给普通用户授予其他DATABASE的读写权限,使其可以在此DATABASE上读写数据,但不能对其进行删除和修改数据库的操作。 +TDengine has superusers and standard users. The default superuser name is root. This account has all permissions. You can use the superuser account to create standard users. With no permissions, standard users can create databases and have permissions on the databases that they create. These include deleting, modifying, querying, and writing to their own databases. Superusers can grant users permission to read and write other databases. However, standard users cannot delete or modify databases created by other users. -对于非DATABASE的对象,如USER、DNODE、UDF、QNODE等,普通用户只有读权限(一般为SHOW命令),不能创建和修改。 +For non-database objects such as users, dnodes, and user-defined functions, standard users have read permissions only, generally by means of the SHOW statement. Standard users cannot create or modify these objects. -## 撤销授权 +## Revoke Permissions ```sql REVOKE privileges ON priv_level FROM user_name @@ -91,4 +91,4 @@ priv_level : { ``` -收回对用户的授权。 \ No newline at end of file +Revoke permissions from a user. diff --git a/docs/en/12-taos-sql/26-udf.md b/docs/en/12-taos-sql/26-udf.md index bd8d61a584..e6199e8b31 100644 --- a/docs/en/12-taos-sql/26-udf.md +++ b/docs/en/12-taos-sql/26-udf.md @@ -1,28 +1,68 @@ --- -sidebar_label: 自定义函数 -title: 用户自定义函数 +sidebar_label: User-Defined Functions +title: User-Defined Functions (UDF) --- -除了 TDengine 的内置函数以外,用户还可以编写自己的函数逻辑并加入TDengine系统中。 +You can create user-defined functions and import them into TDengine. +## Create UDF -## 创建函数 +SQL command can be executed on the host where the generated UDF DLL resides to load the UDF DLL into TDengine. This operation cannot be done through REST interface or web console. Once created, any client of the current TDengine can use these UDF functions in their SQL commands. UDF are stored in the management node of TDengine. The UDFs loaded in TDengine would be still available after TDengine is restarted. +When creating UDF, the type of UDF, i.e. a scalar function or aggregate function must be specified. If the specified type is wrong, the SQL statements using the function would fail with errors. The input data type and output data type must be consistent with the UDF definition. + +- Create Scalar Function ```sql -CREATE [AGGREGATE] FUNCTION func_name AS library_path OUTPUTTYPE type_name [BUFSIZE value] +CREATE FUNCTION function_name AS library_path OUTPUTTYPE output_type; ``` -语法说明: + - function_name: The scalar function name to be used in SQL statement which must be consistent with the UDF name and is also the name of the compiled DLL (.so file). + - library_path: The absolute path of the DLL file including the name of the shared object file (.so). The path must be quoted with single or double quotes. + - output_type: The data type of the results of the UDF. + + For example, the following SQL statement can be used to create a UDF from `libbitand.so`. + + ```sql + CREATE FUNCTION bit_and AS "/home/taos/udf_example/libbitand.so" OUTPUTTYPE INT; + ``` + +- Create Aggregate Function +```sql +CREATE AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [ BUFSIZE buffer_size ]; +``` + + - function_name: The aggregate function name to be used in SQL statement which must be consistent with the udfNormalFunc name and is also the name of the compiled DLL (.so file). + - library_path: The absolute path of the DLL file including the name of the shared object file (.so). The path must be quoted with single or double quotes. + - output_type: The output data type, the value is the literal string of the supported TDengine data type. + - buffer_size: The size of the intermediate buffer in bytes. This parameter is optional. + + For example, the following SQL statement can be used to create a UDF from `libl2norm.so`. + + ```sql + CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 8; + ``` +For more information about user-defined functions, see [User-Defined Functions](../../develop/udf). -AGGREGATE:标识此函数是标量函数还是聚集函数。 -func_name:函数名,必须与函数实现中udfNormalFunc的实际名称一致。 -library_path:包含UDF函数实现的动态链接库的绝对路径,是在客户端侧主机上的绝对路径。 -OUTPUTTYPE:标识此函数的返回类型。 -BUFSIZE:中间结果的缓冲区大小,单位是字节。不设置则默认为0。最大不可超过512字节。 +## Manage UDF -关于如何开发自定义函数,请参考 [UDF使用说明](../../develop/udf)。 +- The following statement deleted the specified user-defined function. +``` +DROP FUNCTION function_name; +``` -## 删除自定义函数 +- function_name: The value of function_name in the CREATE statement used to import the UDF for example `bit_and` or `l2norm`. +```sql +DROP FUNCTION bit_and; +``` +- Show Available UDF +```sql +SHOW FUNCTIONS; +``` + +## Call UDF +The function name specified when creating UDF can be used directly in SQL statements, just like builtin functions. For example: ```sql -DROP FUNCTION func_name -``` \ No newline at end of file +SELECT X(c1,c2) FROM table/stable; +``` + +The above SQL statement invokes function X for column c1 and c2. You can use query keywords like WHERE with user-defined functions. diff --git a/docs/en/12-taos-sql/27-index.md b/docs/en/12-taos-sql/27-index.md index 2c0907723e..7d09bc43ab 100644 --- a/docs/en/12-taos-sql/27-index.md +++ b/docs/en/12-taos-sql/27-index.md @@ -1,11 +1,11 @@ --- -sidebar_label: 索引 -title: 使用索引 +sidebar_label: Index +title: Using Indices --- -TDengine 从 3.0.0.0 版本开始引入了索引功能,支持 SMA 索引和 FULLTEXT 索引。 +TDengine supports SMA and FULLTEXT indexing. -## 创建索引 +## Create an Index ```sql CREATE FULLTEXT INDEX index_name ON tb_name (col_name [, col_name] ...) @@ -19,29 +19,29 @@ functions: function [, function] ... ``` -### SMA 索引 +### SMA Indexing -对指定列按 INTERVAL 子句定义的时间窗口创建进行预聚合计算,预聚合计算类型由 functions_string 指定。SMA 索引能提升指定时间段的聚合查询的性能。目前,限制一个超级表只能创建一个 SMA INDEX。 +Performs pre-aggregation on the specified column over the time window defined by the INTERVAL clause. The type is specified in functions_string. SMA indexing improves aggregate query performance for the specified time period. One supertable can only contain one SMA index. -- 支持的函数包括 MAX、MIN 和 SUM。 -- WATERMARK: 最小单位毫秒,取值范围 [0ms, 900000ms],默认值为 5 秒,只可用于超级表。 -- MAX_DELAY: 最小单位毫秒,取值范围 [1ms, 900000ms],默认值为 interval 的值(但不能超过最大值),只可用于超级表。注:不建议 MAX_DELAY 设置太小,否则会过于频繁的推送结果,影响存储和查询性能,如无特殊需求,取默认值即可。 +- The max, min, and sum functions are supported. +- WATERMARK: Enter a value between 0ms and 900000ms. The most precise unit supported is milliseconds. The default value is 5 seconds. This option can be used only on supertables. +- MAX_DELAY: Enter a value between 1ms and 900000ms. The most precise unit supported is milliseconds. The default value is the value of interval provided that it does not exceed 900000ms. This option can be used only on supertables. Note: Retain the default value if possible. Configuring a small MAX_DELAY may cause results to be frequently pushed, affecting storage and query performance. -### FULLTEXT 索引 +### FULLTEXT Indexing -对指定列建立文本索引,可以提升含有文本过滤的查询的性能。FULLTEXT 索引不支持 index_option 语法。现阶段只支持对 JSON 类型的标签列创建 FULLTEXT 索引。不支持多列联合索引,但可以为每个列分布创建 FULLTEXT 索引。 +Creates a text index for the specified column. FULLTEXT indexing improves performance for queries with text filtering. The index_option syntax is not supported for FULLTEXT indexing. FULLTEXT indexing is supported for JSON tag columns only. Multiple columns cannot be indexed together. However, separate indices can be created for each column. -## 删除索引 +## Delete an Index ```sql DROP INDEX index_name; ``` -## 查看索引 +## View Indices ````sql ```sql SHOW INDEXES FROM tbl_name [FROM db_name]; ```` -显示在所指定的数据库或表上已创建的索引。 +Shows indices that have been created for the specified database or table. diff --git a/docs/en/12-taos-sql/28-recovery.md b/docs/en/12-taos-sql/28-recovery.md index 72b220b8ff..14ac14f867 100644 --- a/docs/en/12-taos-sql/28-recovery.md +++ b/docs/en/12-taos-sql/28-recovery.md @@ -1,38 +1,38 @@ --- -sidebar_label: 异常恢复 -title: 异常恢复 +sidebar_label: Error Recovery +title: Error Recovery --- -在一个复杂的应用场景中,连接和查询任务等有可能进入一种错误状态或者耗时过长迟迟无法结束,此时需要有能够终止这些连接或任务的方法。 +In a complex environment, connections and query tasks may encounter errors or fail to return in a reasonable time. If this occurs, you can terminate the connection or task. -## 终止连接 +## Terminate a Connection ```sql KILL CONNECTION conn_id; ``` -conn_id 可以通过 `SHOW CONNECTIONS` 获取。 +You can use the SHOW CONNECTIONS statement to find the conn_id. -## 终止查询 +## Terminate a Query ```sql SHOW QUERY query_id; ``` -query_id 可以通过 `SHOW QUERIES` 获取。 +You can use the SHOW QUERIES statement to find the query_id. -## 终止事务 +## Terminate a Transaction ```sql KILL TRANSACTION trans_id ``` -trans_id 可以通过 `SHOW TRANSACTIONS` 获取。 +You can use the SHOW TRANSACTIONS statement to find the trans_id. -## 重置客户端缓存 +## Reset Client Cache ```sql RESET QUERY CACHE; ``` -如果在多客户端情况下出现元数据不同步的情况,可以用这条命令强制清空客户端缓存,随后客户端会从服务端拉取最新的元数据。 +If metadata becomes desynchronized among multiple clients, you can use this command to clear the client-side cache. Clients then obtain the latest metadata from the server. diff --git a/docs/en/12-taos-sql/29-changes.md b/docs/en/12-taos-sql/29-changes.md new file mode 100644 index 0000000000..8532eeac5d --- /dev/null +++ b/docs/en/12-taos-sql/29-changes.md @@ -0,0 +1,95 @@ +--- +sidebar_label: Changes in TDengine 3.0 +title: Changes in TDengine 3.0 +description: "This document explains how TDengine SQL has changed in version 3.0." +--- + +## Basic SQL Elements + +| # | **Element** | **