diff --git a/docs/en/02-intro.md b/docs/en/02-intro.md index f0e1bc10b250ebbcc4057c26e46d2960df5bf7fe..accc393029a812c0028c1ed076b7cf02d5602782 100644 --- a/docs/en/02-intro.md +++ b/docs/en/02-intro.md @@ -23,8 +23,8 @@ The major features are listed below: 3. Data Explorer: browse through databases and even run SQL queries once you login. 4. Visualization: - Supports [Grafana](../visual/grafana/) - - Supports Google data studio - - Supports Grafana cloud (to be released soon) + - Supports Google Data Studio + - Supports Grafana Cloud (to be released soon) 5. [Data Subscription](../data-subscription/): Application can subscribe a table or a set of tables. API is the same as Kafka, but you can specify filter conditions and you can share the topic with other users and user groups in TDengien Cloud. 6. [Stream Processing](../stream/): Not only is the continuous query is supported, but TDengine also supports event driven stream processing, so Flink or Spark is not needed for time-series data processing. 7. Enterprise diff --git a/docs/en/07-data-in/20-telegraf.md b/docs/en/07-data-in/20-telegraf.md index 51cf08bedc838b3c76c958d454c5c50c18c0b9b7..d29685d0829b4e76ae37d9c251511c011f229f3e 100644 --- a/docs/en/07-data-in/20-telegraf.md +++ b/docs/en/07-data-in/20-telegraf.md @@ -55,7 +55,7 @@ Edit section "outputs.http". {{#include docs/examples/thirdparty/telegraf-conf.toml:null:nrc}} ``` -The resulting configuration will collect CPU and memory data and sends it to TDengine database named "telegraf". Database "telegraf" will be created automatically if it dose not exist in advance. +The resulting configuration will collect CPU and memory data and sends it to TDengine database named "telegraf". Database "telegraf" must be created first through TDengine Cloud explorer. ## Start Telegraf diff --git a/docs/en/09-data-out/04-taosdump.md b/docs/en/09-data-out/04-taosdump.md index 3a4198e53bf0e67b33ab8ed81dc834c3eb2e7548..481337e5ec30489aac5859dde0dc9cf602a4b277 100644 --- a/docs/en/09-data-out/04-taosdump.md +++ b/docs/en/09-data-out/04-taosdump.md @@ -4,7 +4,7 @@ title: Dump Data Using taosDump description: Dump data from TDengine into files using taosDump --- -# taosDump +## Overview taosdump is a tool that supports backing up data from a running TDengine cluster and restoring the backed up data to the same, or another running TDengine cluster. diff --git a/docs/en/09-data-out/_sub_c.mdx b/docs/en/09-data-out/_sub_c.mdx deleted file mode 100644 index b0667268e9978533e84e68ea3fe5f285538df762..0000000000000000000000000000000000000000 --- a/docs/en/09-data-out/_sub_c.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```c -{{#include docs/examples/c/tmq_example.c}} -``` diff --git a/docs/en/09-data-out/_sub_cs.mdx b/docs/en/09-data-out/_sub_cs.mdx deleted file mode 100644 index a09e91422b12d3bfea1794d73aec28335dea9056..0000000000000000000000000000000000000000 --- a/docs/en/09-data-out/_sub_cs.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```csharp -{{#include docs/examples/csharp/native-example/SubscribeDemo.cs}} -``` \ No newline at end of file diff --git a/docs/en/09-data-out/_sub_go.mdx b/docs/en/09-data-out/_sub_go.mdx deleted file mode 100644 index 34b2aefd92c5eef75b59fbbba96b83da091722a7..0000000000000000000000000000000000000000 --- a/docs/en/09-data-out/_sub_go.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```go -{{#include docs/examples/go/sub/main.go}} -``` \ No newline at end of file diff --git a/docs/en/09-data-out/_sub_java.mdx b/docs/en/09-data-out/_sub_java.mdx deleted file mode 100644 index d14b5fd6095dd90f89dd2c2e828858585cfddff9..0000000000000000000000000000000000000000 --- a/docs/en/09-data-out/_sub_java.mdx +++ /dev/null @@ -1,11 +0,0 @@ -```java -{{#include docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java}} -{{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}} -{{#include docs/examples/java/src/main/java/com/taos/example/Meters.java}} -``` -```java -{{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}} -``` -```java -{{#include docs/examples/java/src/main/java/com/taos/example/Meters.java}} -``` \ No newline at end of file diff --git a/docs/en/09-data-out/_sub_node.mdx b/docs/en/09-data-out/_sub_node.mdx deleted file mode 100644 index 3eeff0922a31a478dd34a77c6cb6471f51a57a8c..0000000000000000000000000000000000000000 --- a/docs/en/09-data-out/_sub_node.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```js -{{#include docs/examples/node/nativeexample/subscribe_demo.js}} -``` \ No newline at end of file diff --git a/docs/en/09-data-out/_sub_python.mdx b/docs/en/09-data-out/_sub_python.mdx deleted file mode 100644 index 1309da5b416799492a6b85aae4b775e227c0ad6e..0000000000000000000000000000000000000000 --- a/docs/en/09-data-out/_sub_python.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```py -{{#include docs/examples/python/tmq_example.py}} -``` diff --git a/docs/en/09-data-out/_sub_rust.mdx b/docs/en/09-data-out/_sub_rust.mdx deleted file mode 100644 index eb06c8f18c3e0f2e908a2d8d9fad9b0e73b866a2..0000000000000000000000000000000000000000 --- a/docs/en/09-data-out/_sub_rust.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```rust -{{#include docs/examples/rust/cloud-example/examples/subscribe_demo.rs}} -``` diff --git a/docs/en/10-programming/01-connect/01-python.md b/docs/en/10-programming/01-connect/01-python.md index f35641d16f14e3c43b63d03988dbee0c1424e9e3..04b34c324135ffc9bdd6f75a84bcdc57f0466a4b 100644 --- a/docs/en/10-programming/01-connect/01-python.md +++ b/docs/en/10-programming/01-connect/01-python.md @@ -88,7 +88,7 @@ For more details about how to write or query data via REST API, please check [RE ## Jupyter -**Step 1: Install** +### Step 1: Install For the users who are familiar with Jupyter to program in Python, both TDengine Python connector and Jupyter need to be ready in your environment. If you have not done yet, please use the commands below to install them. @@ -113,9 +113,9 @@ conda install -c conda-forge taospy -**Step 2: Configure** +### Step 2: Configure -In order for Jupyter to connect to TDengine cloud service, before launching Jupypter, the environment setting must be performed. We use Linux bash as example. +In order for Jupyter to connect to TDengine cloud service, before launching Jupyter, the environment setting must be performed. We use Linux bash as example. ```bash export TDENGINE_CLOUD_TOKEN="" @@ -123,7 +123,7 @@ export TDENGINE_CLOUD_URL="" jupyter lab ``` -**Step 3: Connect** +### Step 3: Connect Once jupyter lab is launched, Jupyter lab service is automatically connected and shown in your browser. You can create a new notebook and copy the sample code below and run it. diff --git a/docs/en/10-programming/01-connect/02-java.md b/docs/en/10-programming/01-connect/02-java.md index f9c79f18013142f978b3bd93bdd335bc150aeaff..0f55200d08f886c4e4c1cfbd2ae0f862ca32e753 100644 --- a/docs/en/10-programming/01-connect/02-java.md +++ b/docs/en/10-programming/01-connect/02-java.md @@ -71,7 +71,7 @@ To obtain the value of JDBC URL, please log in [TDengine Cloud](https://cloud.td ## Connect -Code bellow get JDBC URL from environment variables first and then create a `Connection` object, witch is a standard JDBC Connection object. +Code bellow get JDBC URL from environment variables first and then create a `Connection` object, which is a standard JDBC Connection object. ```java {{#include docs/examples/java/src/main/java/com/taos/example/ConnectCloudExample.java:connect}} diff --git a/docs/en/10-programming/01-connect/04-rust.md b/docs/en/10-programming/01-connect/04-rust.md index d0e0d013efe4e7c07434f08f75b20fe056b67e4d..7c6f4032b5bbc6e8a558f3e9a137defd40b6ec3b 100644 --- a/docs/en/10-programming/01-connect/04-rust.md +++ b/docs/en/10-programming/01-connect/04-rust.md @@ -31,7 +31,7 @@ anyhow = "1.0.0" ## Config -Run this command in your terminal to save TDengine cloud token as variables: +Run this command in your terminal to save TDengine cloud DSN as variables: diff --git a/docs/en/10-programming/01-connect/06-csharp.md b/docs/en/10-programming/01-connect/06-csharp.md index 0fe7e3baa8b0d432c6cbfe4050a803420253de96..46e5699f6795c96d9f1825bbc0430f98b52312b6 100644 --- a/docs/en/10-programming/01-connect/06-csharp.md +++ b/docs/en/10-programming/01-connect/06-csharp.md @@ -33,7 +33,9 @@ Add following ItemGroup and Task to your project file. +``` +```bash dotnet add package TDengine.Connector ``` diff --git a/docs/en/10-programming/02-model.md b/docs/en/10-programming/02-model.md index 14f4a0b01e79d39166cd4fcbd21d59e996fa00a1..6223ae84daeb1f800d4d69a230e94e20f87538ca 100644 --- a/docs/en/10-programming/02-model.md +++ b/docs/en/10-programming/02-model.md @@ -3,7 +3,9 @@ title: Data Model description: Typical Data Model used in TDengine --- -The data model employed by TDengine is similar to that of a relational database. You have to create databases and tables. You must design the data model based on your own business and application requirements. You should design the STable (an abbreviation for super table) schema to fit your data. This chapter will explain the big picture without getting into syntactical details. +The data model employed by TDengine is similar to that of a relational database. You have to create databases and tables. You must design the data model based on your own business and application requirements. You should design the [STable](/concept/#super-table-stable) (an abbreviation for super table) schema to fit your data. This chapter will explain the big picture without getting into syntactical details. + +Note: before you read this chapter, please make sure you have already read through [Key Concepts](/concept/), since TDengine introduces new concepts like "one table for one [data collection point](/concept/#data-collection-point)" and "[super table](/concept/#super-table-stable)". ## Create Database diff --git a/docs/en/10-programming/03-insert.md b/docs/en/10-programming/03-insert.md index b31d38a44b22ccdb019f021254d94350c6c02a3e..86f57102b1093820e87a05cde9129cd9045b799c 100644 --- a/docs/en/10-programming/03-insert.md +++ b/docs/en/10-programming/03-insert.md @@ -33,7 +33,7 @@ INSERT INTO test.d101 VALUES (1538548684000, 10.2, 220, 0.23) (1538548696650, 10 Data can be inserted into multiple tables in the same SQL statement. The example below inserts 2 rows into table "d101" and 1 row into table "d102". ```sql -INSERT INTO test.d101 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 218, 0.33) d102 VALUES (1538548696800, 12.3, 221, 0.31); +INSERT INTO test.d101 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 218, 0.33) test.d102 VALUES (1538548696800, 12.3, 221, 0.31); ``` For more details about `INSERT` please refer to [INSERT](https://docs.tdengine.com/cloud/taos-sql/insert). diff --git a/docs/en/10-programming/04-query.md b/docs/en/10-programming/04-query.md index c7ab775ca8cf4607f15a66f7e112fd74e28ef1c2..4b23ac692978354d1d846d0cbc02b79186ed07d9 100644 --- a/docs/en/10-programming/04-query.md +++ b/docs/en/10-programming/04-query.md @@ -42,7 +42,7 @@ For detailed query syntax please refer to [Select](https://docs.tdengine.com/clo In most use cases, there are always multiple kinds of data collection points. A new concept, called STable (abbreviation for super table), is used in TDengine to represent one type of data collection point, and a subtable is used to represent a specific data collection point of that type. Tags are used by TDengine to represent the static properties of data collection points. A specific data collection point has its own values for static properties. By specifying filter conditions on tags, aggregation can be performed efficiently among all the subtables created via the same STable, i.e. same type of data collection points. Aggregate functions applicable for tables can be used directly on STables; the syntax is exactly the same. -In summary, records across subtables can be aggregated by a simple query on their STable. It is like a join operation. However, tables belonging to different STables can not be aggregated. +In summary, records across subtables can be aggregated by a simple query on their STable. It is like a join operation. However, tables belonging to different STables can not be aggregated. ### Example 1 @@ -106,6 +106,7 @@ Down sampling can also be used for STable. For example, the below SQL statement ```sql title="SQL" SELECT _wstart, SUM(current) FROM test.meters where location like "California%" INTERVAL(1s) limit 5; ``` + ```txt title="output" _wstart | sum(current) | ====================================================== diff --git a/docs/en/10-programming/06-connector/01-python.md b/docs/en/10-programming/06-connector/01-python.md index 7875f1ca1c6e3f5a4b6a13a5db233d7aae315dbb..ab6da4b263ba539c1f1bbc54c7cd9c4e2f573376 100644 --- a/docs/en/10-programming/06-connector/01-python.md +++ b/docs/en/10-programming/06-connector/01-python.md @@ -18,12 +18,12 @@ The source code for the Python connector is hosted on [GitHub](https://github.co ### Install via pip ``` -pip3 install -U taospy +pip3 install -U taospy[ws] ``` ### Install vial conda ``` -conda install -c conda-forge taospy +conda install -c conda-forge taospy taospyws ``` ### Installation verification @@ -75,16 +75,45 @@ The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-ap For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html). -## Important Update +## Other notes + +### Exception handling + +All errors from database operations are thrown directly as exceptions and the error message from the database is passed up the exception stack. The application is responsible for exception handling. For example: + +```python +import taos + +try: + conn = taos.connect() + conn.execute("CREATE TABLE 123") # wrong sql +except taos.Error as e: + print(e) + print("exception class: ", e.__class__.__name__) + print("error number:", e.errno) + print("error message:", e.msg) +except BaseException as other: + print("exception occur") + print(other) + +# output: +# [0x0216]: syntax error near 'Incomplete SQL statement' +# exception class: ProgrammingError +# error number: -2147483114 +# error message: syntax error near 'Incomplete SQL statement' + +``` + +[view source code](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/python/handle_exception.py) + +### About nanoseconds -| Connector version | Important Update | Release date | -| ----------------- | ----------------------------------------- | ------------ | -| 2.6.2 | fix ci script | 2022-08-18 | -| 2.5.2 | fix taos-ws-py python version dependency | 2022-08-12 | -| 2.5.1 | (rest): add timezone option | 2022-08-11 | -| 2.5.0 | add taosws module | 2022-08-10 | -| 2.4.0 | add execute method to TaosRestConnection | 2022-07-18 | -| 2.3.3 | support connect to TDengine Cloud Service | 2022-06-06 | +Due to the current imperfection of Python's nanosecond support (see link below), the current implementation returns integers at nanosecond precision instead of the `datetime` type produced by `ms` and `us`, which application developers will need to handle on their own. And it is recommended to use pandas' to_datetime(). The Python Connector may modify the interface in the future if Python officially supports nanoseconds in full. + +1. https://stackoverflow.com/questions/10611328/parsing-datetime-strings-containing-nanoseconds +2. https://www.python.org/dev/peps/pep-0564/ + +## Important Update [**Release Notes**](https://github.com/taosdata/taos-connector-python/releases) diff --git a/docs/en/10-programming/06-connector/02-java.md b/docs/en/10-programming/06-connector/02-java.md index d16b21621b40c68069609a4c5aa4b672d212a420..9cea45d3cc9da5ad8ce828804c9cb7d3b7901a7b 100644 --- a/docs/en/10-programming/06-connector/02-java.md +++ b/docs/en/10-programming/06-connector/02-java.md @@ -16,19 +16,19 @@ import TabItem from '@theme/TabItem'; TDengine currently supports timestamp, number, character, Boolean type, and the corresponding type conversion with Java is as follows: -| TDengine DataType | JDBCType (driver version < 2.0.24) | JDBCType (driver version > = 2.0.24) | -| ----------------- | ---------------------------------- | ------------------------------------ | -| TIMESTAMP | java.lang.Long | java.sql.Timestamp | -| INT | java.lang.Integer | java.lang.Integer | -| BIGINT | java.lang.Long | java.lang.Long | -| FLOAT | java.lang.Float | java.lang.Float | -| DOUBLE | java.lang.Double | java.lang.Double | -| SMALLINT | java.lang.Short | java.lang.Short | -| TINYINT | java.lang.Byte | java.lang.Byte | -| BOOL | java.lang.Boolean | java.lang.Boolean | -| BINARY | java.lang.String | byte array | -| NCHAR | java.lang.String | java.lang.String | -| JSON | - | java.lang.String | +| TDengine DataType | JDBCType | +| ----------------- | ---------------------------------- | +| TIMESTAMP | java.sql.Timestamp | +| INT | java.lang.Integer | +| BIGINT | java.lang.Long | +| FLOAT | java.lang.Float | +| DOUBLE | java.lang.Double | +| SMALLINT | java.lang.Short | +| TINYINT | java.lang.Byte | +| BOOL | java.lang.Boolean | +| BINARY | byte array | +| NCHAR | java.lang.String | +| JSON | java.lang.String | **Note**: Only TAG supports JSON types @@ -53,7 +53,7 @@ Add following dependency in the `pom.xml` file of your Maven project: com.taosdata.jdbc taos-jdbcdriver - 2.0.** + 3.0.0 ``` @@ -68,7 +68,7 @@ cd taos-connector-jdbc mvn clean install -Dmaven.test.skip=true ``` -After compilation, a jar package named taos-jdbcdriver-2.0.XX-dist.jar is generated in the target directory, and the compiled jar file is automatically placed in the local Maven repository. +After compilation, a jar package named taos-jdbcdriver-3.0.*-dist.jar is generated in the target directory, and the compiled jar file is automatically placed in the local Maven repository. @@ -76,8 +76,7 @@ After compilation, a jar package named taos-jdbcdriver-2.0.XX-dist.jar is genera ## Establish Connection using URL TDengine's JDBC URL specification format is: -`jdbc:[TAOS-RS]://[host_name]:[port]/[database_name]?batchfetch={true|false}&useSSL={true|false}&token={token}&httpPoolSize={httpPoolSize}&httpKeepAlive={true|false}]&httpConnectTimeout={httpTimeout}&httpSocketTimeout={socketTimeout}` - +`jdbc:TAOS-RS://[host_name]:[port]/[database_name]?batchfetch={true|false}&useSSL={true|false}&token={token}&httpPoolSize={httpPoolSize}&httpKeepAlive={true|false}]&httpConnectTimeout={httpTimeout}&httpSocketTimeout={socketTimeout}` ```java Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); @@ -85,7 +84,7 @@ String jdbcUrl = System.getenv("TDENGINE_JDBC_URL"); Connection conn = DriverManager.getConnection(jdbcUrl); ``` -Note: +:::note - REST API is stateless. When using the JDBC REST connection, you need to specify the database name of the table and super table in SQL. For example. @@ -97,7 +96,7 @@ Note: ```sql insert into test using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6); ``` - +::: ### Establish Connection using URL and Properties @@ -120,7 +119,6 @@ If the configuration parameters are duplicated in the URL, Properties, the `prio 1. JDBC URL parameters, as described above, can be specified in the parameters of the JDBC URL. 2. Properties connProps - ## Usage Examples ### Create Database and Tables @@ -141,8 +139,8 @@ int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now System.out.println("insert " + affectedRows + " rows."); ``` -`now`` is an internal function. The default is the current time of the client's computer. - +> `now` is an internal function. The default is the current time of the client's computer. +> `now + 1s` represents the current time of the client plus 1 second, followed by the number representing the unit of time: a (milliseconds), s (seconds), m (minutes), h (hours), d (days), w (weeks), n (months), y (years). ### Querying data @@ -188,6 +186,9 @@ There are three types of error codes that the JDBC connector can report: - Error code of the native connection method (error code between 0x2351 and 0x2400) - Error code of other TDengine function modules +For specific error codes, please refer to. + +- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java) ### Closing resources @@ -197,10 +198,10 @@ stmt.close(); conn.close(); ``` -:::note +:::note Be sure to close the connection, otherwise, there will be a connection leak. - ::: + ### Use with Connection Pool #### HikariCP @@ -283,12 +284,51 @@ Please refer to: [JDBC example](https://github.com/taosdata/TDengine/tree/develo ## Recent update logs -| taos-jdbcdriver version | major changes | -| :---------------------: | :------------------------------------------: | -| 2.0.38 | JDBC REST connections add bulk pull function | -| 2.0.37 | Added support for json tags | -| 2.0.36 | Add support for schemaless writing | +| taos-jdbcdriver version | major changes | +| :---------------------: | :--------------------------------------------: | +| 3.0.3 | fix timestamp resolution error for REST connection in jdk17+ version | +| 3.0.1 - 3.0.2 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use 3.0.2 in the JDK 8 environment | +| 3.0.0 | Support for TDengine 3.0 | +| 2.0.42 | fix wasNull interface return value in WebSocket connection | +| 2.0.41 | fix decode method of username and password in REST connection | +| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters | +| 2.0.38 | JDBC REST connections add bulk pull function | +| 2.0.37 | Support json tags | +| 2.0.36 | Support schemaless writing | + +## Frequently Asked Questions + +1. Why is there no performance improvement when using Statement's `addBatch()` and `executeBatch()` to perform `batch data writing/update`? + + **Cause**: In TDengine's JDBC implementation, SQL statements submitted by `addBatch()` method are executed sequentially in the order they are added, which does not reduce the number of interactions with the server and does not bring performance improvement. + + **Solution**: 1. splice multiple values in a single insert statement; 2. use multi-threaded concurrent insertion; 3. use parameter-bound writing + +2. java.lang.UnsatisfiedLinkError: no taos in java.library.path + + **Cause**: The program did not find the dependent native library `taos`. + + **Solution**: On Windows you can copy `C:\TDengine\driver\taos.dll` to the `C:\Windows\System32` directory, on Linux the following soft link will be created `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` will work, on macOS the lib soft link will be `/usr/local/lib/libtaos.dylib`. + +3. java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform + + **Cause**: Currently, TDengine only supports 64-bit JDK. + + **Solution**: Reinstall the 64-bit JDK. + +4. java.lang.NoSuchMethodError: setByteArray + + **Cause**: taos-jbdcdriver 3.* only supports TDengine 3.0 and later. + + **Solution**: Use taos-jdbcdriver 2.* with your TDengine 2.* deployment. + +5. java.lang.NoSuchMethodError: java.nio.ByteBuffer.position(I)Ljava/nio/ByteBuffer; ... taos-jdbcdriver-3.0.1.jar + +**Cause**:taos-jdbcdriver 3.0.1 is compiled on JDK 11. + +**Solution**: Use taos-jdbcdriver 3.0.2. +For additional troubleshooting, see [FAQ](../../../train-faq/faq). ## API Reference diff --git a/docs/en/10-programming/06-connector/03-go.md b/docs/en/10-programming/06-connector/03-go.md index 9de2b3141237a8dd1f3867312d3914f25f449875..e59423b422048f13f6776df3d821a4b18e6b506f 100644 --- a/docs/en/10-programming/06-connector/03-go.md +++ b/docs/en/10-programming/06-connector/03-go.md @@ -10,26 +10,40 @@ This article describes how to install `driver-go` and connect to TDengine cluste The source code of `driver-go` is hosted on [GitHub](https://github.com/taosdata/driver-go). -## Installation steps +## Version support + +Please refer to [version support list](/reference/connector#version-support) + +## Installation Steps + +### Pre-installation preparation + +* Install Go development environment (Go 1.14 and above, GCC 4.8.5 and above) +- If you use the native connector, please install the TDengine client driver. Please refer to [Install Client Driver](/reference/connector/#install-client-driver) for specific steps + +Configure the environment variables and check the command. + +* ```go env``` +* ```gcc -v``` + ### Use go get to install -``` -go get -u github.com/taosdata/driver-go/v2@develop -``` +`go get -u github.com/taosdata/driver-go/v3@latest` + ### Manage with go mod 1. Initialize the project with the `go mod` command. ```text go mod init taos-demo - ``` + ``` 2. Introduce taosSql ```go import ( "database/sql" - _ "github.com/taosdata/driver-go/v2/taosSql" + _ "github.com/taosdata/driver-go/v3/taosSql" ) ``` @@ -37,7 +51,7 @@ go get -u github.com/taosdata/driver-go/v2@develop ```text go mod tidy - ``` + ``` 4. Run the program with `go run taos-demo` or compile the binary with the `go build` command. @@ -46,7 +60,7 @@ go get -u github.com/taosdata/driver-go/v2@develop go build ``` -## Create a connection +## Establishing a connection ### Data source name (DSN) @@ -73,7 +87,10 @@ Use `taosRestful` as `driverName` and use a correct [DSN](#DSN) as `dataSourceNa ## Sample programs -* [sample program](https://github.com/taosdata/TDengine/tree/develop/examples/go) +### More sample programs + +* [sample program](https://github.com/taosdata/driver-go/tree/3.0/examples) + * [Video tutorial](https://www.taosdata.com/blog/2020/11/11/1951.html). ## Usage limitations @@ -92,7 +109,7 @@ import ( "fmt" "time" - _ "github.com/taosdata/driver-go/v2/taosRestful" + _ "github.com/taosdata/driver-go/v3/taosRestful" ) func main() { @@ -187,7 +204,6 @@ This API is created successfully without checking permissions, but only when you `sql.Open` Built-in method to execute query statements. - ## API Reference -Full API see [driver-go documentation](https://pkg.go.dev/github.com/taosdata/driver-go/v2) \ No newline at end of file +Full API see [driver-go documentation](https://pkg.go.dev/github.com/taosdata/driver-go/v3) \ No newline at end of file diff --git a/docs/en/10-programming/06-connector/04-rust.md b/docs/en/10-programming/06-connector/04-rust.md index 99765329f488b7650e119dd07e1bd8199b673a4d..2e52afb6b0789a386f3b42890a7768c7da1ffbbe 100644 --- a/docs/en/10-programming/06-connector/04-rust.md +++ b/docs/en/10-programming/06-connector/04-rust.md @@ -3,13 +3,19 @@ toc_max_heading_level: 4 sidebar_position: 5 sidebar_label: Rust title: TDengine Rust Connector -description: Detailed guide for Rust Connector +description: This document describes the TDengine Rust connector. --- +[![Crates.io](https://img.shields.io/crates/v/taos)](https://crates.io/crates/taos) ![Crates.io](https://img.shields.io/crates/d/taos) [![docs.rs](https://img.shields.io/docsrs/taos)](https://docs.rs/taos) +`taos` is the official Rust connector for TDengine. Rust developers can develop applications to access the TDengine instance data. -`libtaos` is the official Rust language connector for TDengine. Rust developers can develop applications to access the TDengine instance data. +The source code for the Rust connectors is located on [GitHub](https://github.com/taosdata/taos-connector-rust). -The source code for `libtaos` is hosted on [GitHub](https://github.com/taosdata/libtaos-rs). +## Version support + +Please refer to [version support list](/reference/connector#version-support) + +The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 3.0 or higher to avoid known issues. ## Installation @@ -17,74 +23,76 @@ The source code for `libtaos` is hosted on [GitHub](https://github.com/taosdata/ Install the Rust development toolchain. -### Adding libtaos dependencies - -```toml -[dependencies] -# use rest feature -libtaos = { version = "*", features = ["rest"]} -``` +### Adding taos dependencies -### Using connection pools +Depending on the connection method, add the [taos][taos] dependency in your Rust project as follows: -Please enable the `r2d2` feature in `Cargo.toml`. +In `cargo.toml`, add [taos][taos]: ```toml [dependencies] -libtaos = { version = "*", features = ["rest", "r2d2"] } +# use default feature +taos = "*" ``` -## Create a connection +## Establishing a connection -Create `TaosCfg` from TDengine cloud DSN. The DSN should be in form of `://[:port]?token=`. +[TaosBuilder] creates a connection constructor through the DSN connection description string. +The DSN should be in form of `://[:port]?token=`. ```rust -use libtaos::*; -let cfg = TaosCfg::from_dsn(DSN)?; +let builder = TaosBuilder::from_dsn(DSN)?; ``` You can now use this object to create the connection. ```rust -let conn = cfg.connect()? ; +let conn = builder.build()?; ``` The connection object can create more than one. ```rust -let conn = cfg.connect()? ; -let conn2 = cfg.connect()? ; -``` - -You can use connection pools in applications. - -```rust -let pool = r2d2::Pool::builder() - .max_size(10000) // max connections - .build(cfg)? ; - -// ... -// Use pool to get connection -let conn = pool.get()? ; +let conn1 = builder.build()?; +let conn2 = builder.build()?; ``` After that, you can perform the following operations on the database. ```rust -async fn demo() -> Result<(), Error> { - // get connection ... - - // create database - conn.exec("create database if not exists demo").await? - // create table - conn.exec("create table if not exists demo.tb1 (ts timestamp, v int)").await? - // insert - conn.exec("insert into demo.tb1 values(now, 1)").await? - // query - let rows = conn.query("select * from demo.tb1").await? - for row in rows.rows { - println!("{}", row.into_iter().join(",")); +async fn demo(taos: &Taos, db: &str) -> Result<(), Error> { + // prepare database + taos.exec_many([ + format!("DROP DATABASE IF EXISTS `{db}`"), + format!("CREATE DATABASE `{db}`"), + format!("USE `{db}`"), + ]) + .await?; + + let inserted = taos.exec_many([ + // create super table + "CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \ + TAGS (`groupid` INT, `location` BINARY(24))", + // create child table + "CREATE TABLE `d0` USING `meters` TAGS(0, 'California.LosAngles')", + // insert into child table + "INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)", + // insert with NULL values + "INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)", + // insert and automatically create table with tags if not exists + "INSERT INTO `d1` USING `meters` TAGS(1, 'California.SanFrancisco') values(now - 9s, 10.1, 119, 0.33)", + // insert many records in a single sql + "INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)", + ]).await?; + + assert_eq!(inserted, 6); + let mut result = taos.query("select * from `meters`").await?; + + for field in result.fields() { + println!("got field: {}", field.name()); } + + let values = result. } ``` @@ -92,24 +100,26 @@ async fn demo() -> Result<(), Error> { ### Connection pooling -In complex applications, we recommend enabling connection pools. Connection pool for [libtaos] is implemented using [r2d2]. +In complex applications, we recommend enabling connection pools. [taos] implements connection pools based on [r2d2]. As follows, a connection pool with default parameters can be generated. ```rust -let pool = r2d2::Pool::new(cfg)? ; +let pool = TaosBuilder::from_dsn(dsn)?.pool()?; ``` You can set the same connection pool parameters using the connection pool's constructor. ```rust - use std::time::Duration; - let pool = r2d2::Pool::builder() - .max_size(5000) // max connections - .max_lifetime(Some(Duration::from_minutes(100))) // lifetime of each connection - .min_idle(Some(1000)) // minimal idle connections - .connection_timeout(Duration::from_minutes(2)) - .build(cfg); +let dsn = std::env::var("TDENGINE_CLOUD_DSN")?;; + +let opts = PoolBuilder::new() + .max_size(5000) // max connections + .max_lifetime(Some(Duration::from_secs(60 * 60))) // lifetime of each connection + .min_idle(Some(1000)) // minimal idle connections + .connection_timeout(Duration::from_secs(2)); + +let pool = TaosBuilder::from_dsn(dsn)?.with_pool_builder(opts)?; ``` In the application code, use `pool.get()? ` to get a connection object [Taos]. @@ -117,56 +127,99 @@ In the application code, use `pool.get()? ` to get a connection object [Taos]. ```rust let taos = pool.get()? ; ``` +# Connectors -The [Taos] structure is the connection manager in [libtaos] and provides two main APIs. +The [Taos][struct.Taos] object provides an API to perform operations on multiple databases. 1. `exec`: Execute some non-query SQL statements, such as `CREATE`, `ALTER`, `INSERT`, etc. ```rust - taos.exec().await? + let affected_rows = taos.exec("INSERT INTO tb1 VALUES(now, NULL)").await?; + ``` + +2. `exec_many`: Run multiple SQL statements simultaneously or in order. + + ```rust + taos.exec_many([ + "CREATE DATABASE test", + "USE test", + "CREATE TABLE `tb1` (`ts` TIMESTAMP, `val` INT)", + ]).await?; ``` -2. `query`: Execute the query statement and return the [TaosQueryData] object. +3. `query`: Run a query statement and return a [ResultSet] object. ```rust - let q = taos.query("select * from log.logs").await? + let mut q = taos.query("select * from log.logs").await?; ``` - The [TaosQueryData] object stores the query result data and basic information about the returned columns (column name, type, length). + The [ResultSet] object stores query result data and the names, types, and lengths of returned columns - Column information is stored using [ColumnMeta]. + You can obtain column information by using [.fields()]. ```rust - let cols = &q.column_meta; + let cols = q.fields(); for col in cols { - println!("name: {}, type: {:?} , bytes: {}", col.name, col.type_, col.bytes); + println!("name: {}, type: {:?} , bytes: {}", col.name(), col.ty(), col.bytes()); } ``` It fetches data line by line. ```rust - for (i, row) in q.rows.iter().enumerate() { - for (j, cell) in row.iter().enumerate() { - println!("cell({}, {}) data: {}", i, j, cell); + let mut rows = result.rows(); + let mut nrows = 0; + while let Some(row) = rows.try_next().await? { + for (col, (name, value)) in row.enumerate() { + println!( + "[{}] got value in col {} (named `{:>8}`): {}", + nrows, col, name, value + ); } + nrows += 1; } ``` + Or use the [serde](https://serde.rs) deserialization framework. + + ```rust + #[derive(Debug, Deserialize)] + struct Record { + // deserialize timestamp to chrono::DateTime + ts: DateTime, + // float to f32 + current: Option, + // int to i32 + voltage: Option, + phase: Option, + groupid: i32, + // binary/varchar to String + location: String, + } + + let records: Vec = taos + .query("select * from `meters`") + .await? + .deserialize() + .try_collect() + .await?; + ``` + Note that Rust asynchronous functions and an asynchronous runtime are required. -[Taos] provides a few Rust methods that encapsulate SQL to reduce the frequency of `format!` code blocks. +[Taos][struct.Taos] provides Rust methods for some SQL statements to reduce the number of `format!`s. - `.describe(table: &str)`: Executes `DESCRIBE` and returns a Rust data structure. - `.create_database(database: &str)`: Executes the `CREATE DATABASE` statement. +- `.use_database(database: &str)`: Executes the `USE` statement. +In addition, this structure is also the entry point for [Parameter Binding](#Parameter Binding Interface) and [Line Protocol Interface](#Line Protocol Interface). Please refer to the specific API descriptions for usage. -Please move to the Rust documentation hosting page for other related structure API usage instructions: . +For information about other structure APIs, see the [Rust documentation](https://docs.rs/taos). -[libtaos]: https://github.com/taosdata/libtaos-rs -[tdengine]: https://github.com/taosdata/TDengine +[taos]: https://github.com/taosdata/rust-connector-taos [r2d2]: https://crates.io/crates/r2d2 -[TaosCfg]: https://docs.rs/libtaos/latest/libtaos/struct.TaosCfg.html -[Taos]: https://docs.rs/libtaos/latest/libtaos/struct.Taos.html -[TaosQueryData]: https://docs.rs/libtaos/latest/libtaos/field/struct.TaosQueryData.html -[Field]: https://docs.rs/libtaos/latest/libtaos/field/enum.Field.html +[TaosBuilder]: https://docs.rs/taos/latest/taos/struct.TaosBuilder.html +[TaosCfg]: https://docs.rs/taos/latest/taos/struct.TaosCfg.html +[struct.Taos]: https://docs.rs/taos/latest/taos/struct.Taos.html +[Stmt]: https://docs.rs/taos/latest/taos/struct.Stmt.html diff --git a/docs/en/10-programming/06-connector/05-node.md b/docs/en/10-programming/06-connector/05-node.md index 096d65c255eef632424002cb04abbc02e90fda04..9d3a1de1e8e21a847d4681309eb4071a5c0ce666 100644 --- a/docs/en/10-programming/06-connector/05-node.md +++ b/docs/en/10-programming/06-connector/05-node.md @@ -4,9 +4,13 @@ title: TDengine Node.JS Connector description: Detailed guide for Node.JS Connector --- - `td2.0-rest-connector` are the official Node.js language connectors for TDengine. Node.js developers can develop applications to access TDengine instance data. `td2.0-rest-connector` is a **REST connector** that connects to TDengine instances via the REST API. + `@tdengine/rest` are the official Node.js language connectors for TDengine. Node.js developers can develop applications to access TDengine instance data. `@tdengine/rest` is a **REST connector** that connects to TDengine instances via the REST API. -The Node.js connector source code is hosted on [GitHub](https://github.com/taosdata/taos-connector-node). +The source code for the Node.js connectors is located on [GitHub](https://github.com/taosdata/taos-connector-node/tree/3.0). + +## Version support + +Please refer to [version support list](/reference/connector#version-support) ## Installation steps @@ -16,7 +20,7 @@ Install the Node.js development environment ### Install via npm ```bash -npm i td2.0-rest-connector +npm install @tdengine/rest ``` ## Establishing a connection @@ -30,13 +34,31 @@ npm i td2.0-rest-connector {{#include docs/examples/node/reference_example.js:usage}} ``` -## Important Updates +## Frequently Asked Questions + +1. Using REST connections requires starting taosadapter. + + ```bash + sudo systemctl start taosadapter + ``` + +2. Node.js versions + + `@tdengine/client` supports Node.js v10.9.0 to 10.20.0 and 12.8.0 to 12.9.1. + +3. "Unable to establish connection", "Unable to resolve FQDN" + Usually, the root cause is an incorrect FQDN configuration. You can refer to this section in the [FAQ](https://docs.tdengine.com/2.4/train-faq/faq/#2-how-to-handle-unable-to-establish-connection) to troubleshoot. -| td2.0-rest-connector version | Description | -| ------------------------- | ---------------------------------------------------------------- | -| 1.0.5 | Support connect to TDengine cloud service +## Important update records +| package name | version | TDengine version | Description | +|----------------------|---------|---------------------|---------------------------------------------------------------------------| +| @tdengine/rest | 3.0.0 | 3.0.0 | Supports TDengine 3.0. Not compatible with TDengine 2.x. | +| td2.0-rest-connector | 1.0.7 | 2.4.x;2.5.x;2.6.x | Removed default port 6041。 | +| td2.0-rest-connector | 1.0.6 | 2.4.x;2.5.x;2.6.x | Fixed affectRows bug with create, insert, update, and alter. | +| td2.0-rest-connector | 1.0.5 | 2.4.x;2.5.x;2.6.x | Support cloud token | +| td2.0-rest-connector | 1.0.3 | 2.4.x;2.5.x;2.6.x | Supports connection management, standard queries, system information, error information, and continuous queries | ## API Reference -[API Reference](https://docs.taosdata.com/api/td2.0-connector/) \ No newline at end of file +[API Reference](https://docs.taosdata.com/api/td2.0-connector/) diff --git a/docs/en/10-programming/06-connector/06-csharp.md b/docs/en/10-programming/06-connector/06-csharp.md index 1d205f690fa0552e76ea74c689bd1e91af0f4163..2a9be8ef31c9c7f5f7ee3ea8d66e24a545684441 100644 --- a/docs/en/10-programming/06-connector/06-csharp.md +++ b/docs/en/10-programming/06-connector/06-csharp.md @@ -6,15 +6,23 @@ description: Detailed guide for C# Connector `TDengine.Connector` is the official C# connector for TDengine. C# developers can develop applications to access TDengine instance data. +This article describes how to install `TDengine.Connector` in a Linux or Windows environment and connect to TDengine clusters via `TDengine.Connector` to perform basic operations such as data writing and querying. + The source code for `TDengine.Connector` is hosted on [GitHub](https://github.com/taosdata/taos-connector-dotnet/tree/3.0). +## Version support + +Please refer to [version support list](/reference/connector#version-support) + ## Installation ### Pre-installation -Install the .NET deployment SDK. +* Install the [.NET SDK](https://dotnet.microsoft.com/download) +* [Nuget Client](https://docs.microsoft.com/en-us/nuget/install-nuget-client-tools) (optional installation) +* Install TDengine client driver, please refer to [Install client driver](/reference/connector/#install-client-driver) for details -### Add TDengine.Connector through Nuget +### Add `TDengine.Connector` through Nuget ```bash dotnet add package TDengine.Connector @@ -26,7 +34,7 @@ dotnet add package TDengine.Connector {{#include docs/examples/csharp/cloud-example/connect/connect.csproj}} ``` -``` C# +``` csharp {{#include docs/examples/csharp/cloud-example/connect/Program.cs}} ``` @@ -55,9 +63,34 @@ dotnet add package TDengine.Connector ## Important Updates | TDengine.Connector | Description | -| ------------------------- | ---------------------------------------------------------------- | -| 3.0.2 | Support .NET Framework 4.5 and above. Support .Net standard 2.0. Nuget package includes dynamic library for WebSocket.| -| 3.0.1 | Support connect to TDengine cloud service| +|--------------------|--------------------------------| +| 3.0.2 | Support .NET Framework 4.5 and above. Support .Net standard 2.0. Nuget package includes dynamic library for WebSocket.| +| 3.0.1 | Support WebSocket and Cloud,With function query, insert, and parameter binding| +| 3.0.0 | Supports TDengine 3.0.0.0. TDengine 2.x is not supported. Added `TDengine.Impl.GetData()` interface to deserialize query results. | +| 1.0.7 | Fixed TDengine.Query() memory leak. | +| 1.0.6 | Fix schemaless bug in 1.0.4 and 1.0.5. | +| 1.0.5 | Fix Windows sync query Chinese error bug. | 1.0.4 | Fix schemaless bug. | +| 1.0.4 | Add asynchronous query, subscription, and other functions. Fix the binding parameter bug. | +| 1.0.3 | Add parameter binding, schemaless, JSON tag, etc. | +| 1.0.2 | Add connection management, synchronous query, error messages, etc. | + +## Other descriptions + +### Third-party driver + +`Taos` is an ADO.NET connector for TDengine, supporting Linux and Windows platforms. Community contributor `Maikebing@@maikebing contributes the connector`. Please refer to: + +* Interface download: + +## Frequently Asked Questions + +1. "Unable to establish connection", "Unable to resolve FQDN" + + Usually, it's caused by an incorrect FQDN configuration. Please refer to this section in the [FAQ](https://docs.tdengine.com/2.4/train-faq/faq/#2-how-to-handle-unable-to-establish-connection) to troubleshoot. + +2. Unhandled exception. System.DllNotFoundException: Unable to load DLL 'taos' or one of its dependencies: The specified module cannot be found. + + This is usually because the program did not find the dependent client driver. The solution is to copy `C:\TDengine\driver\taos.dll` to the `C:\Windows\System32\` directory on Windows, and create the following soft link on Linux `ln -s /usr/local/taos/driver/libtaos.so.x.x .x.x /usr/lib/libtaos.so` will work. ## API Reference diff --git a/docs/en/10-programming/06-connector/09-rest-api.md b/docs/en/10-programming/06-connector/09-rest-api.md index b51d25843c099b5472fb2e2eb05f9148a64d2e0d..db466d15882b8c16fbb0dd7da1dafb9e7ca682d8 100644 --- a/docs/en/10-programming/06-connector/09-rest-api.md +++ b/docs/en/10-programming/06-connector/09-rest-api.md @@ -4,7 +4,7 @@ title: REST API description: Detailed guide for REST API --- -To support the development of various types of applications and platforms, TDengine provides an API that conforms to REST principles; namely REST API. To minimize the learning cost, unlike REST APIs for other database engines, TDengine allows insertion of SQL commands in the BODY of an HTTP POST request, to operate the database. +To support the development of various types of applications and platforms, TDengine provides an API that conforms to REST principles; namely REST API. To minimize the learning cost, unlike REST APIs for other database engines, TDengine allows insertion of SQL commands in the BODY of an HTTP POST request, to operate the database. :::note One difference from the native connector is that the REST interface is stateless and so the `USE db_name` command has no effect. All references to table names and super table names need to specify the database name in the prefix. TDengine supports specification of the db_name in RESTful URL. If the database name prefix is not specified in the SQL command, the `db_name` specified in the URL will be used. @@ -305,4 +305,3 @@ Description: "rows": 1 } ``` - diff --git a/docs/en/11-visual/01-grafana.md b/docs/en/11-visual/01-grafana.md index a059047ae840eb9b72486a2e7647585839c4efdf..e1f4870f0d08bc96dc62ac0b4ecd34af1a15d97a 100644 --- a/docs/en/11-visual/01-grafana.md +++ b/docs/en/11-visual/01-grafana.md @@ -15,7 +15,7 @@ TDengine currently supports Grafana versions 7.5 and above. Users can go to the ### Install with GUI -The TDengine data source plugin is already published as a signed Grafana plugin. You can easily install it from Grafana Configuration GUI. In any platform you already installed Grafana, you can open the URL http://localhost:3000 then click plugin menu from left panel. +The TDengine data source plugin is already published as a signed Grafana plugin. You can easily install it from Grafana Configuration GUI. In any platform you already installed Grafana, you can open the URL `http://localhost:3000` then click plugin menu from left panel. ![click plugin menu](./grafana/click-plugin-menu-from-config.webp) diff --git a/docs/en/11-visual/02-gds.md b/docs/en/11-visual/02-gds.md index 13714341602834c584c50eae9dd25e9ae3b81487..32069eadd823f21a3ab58d34561156a6796368ec 100644 --- a/docs/en/11-visual/02-gds.md +++ b/docs/en/11-visual/02-gds.md @@ -3,7 +3,7 @@ sidebar_label: Google Data Studio title: Use Google Data Studio --- -Using its [partner connector](https://datastudio.google.com/data?search=TDengine), Google Data Studio can quickly access TDengine and create interactive reports and dashboards using its web-based reporting features.The whole process does not require any code development. Share your reports and dashboards with individuals, teams, or the world. Collaborate in real time. Embed your report on any web page. +Using its [partner connector](https://datastudio.google.com/data?search=TDengine), Google Data Studio can quickly access TDengine and create interactive reports and dashboards using its web-based reporting features.The whole process does not require any code development. Share your reports and dashboards with individuals, teams, or the world. Collaborate in real time. And embbed your report on any web page. Refer to [GitHub](https://github.com/taosdata/gds-connector/blob/master/README.md) for additional information on utilizing the Data Studio with TDengine. @@ -19,23 +19,12 @@ The current [connector](https://datastudio.google.com/data?search=TDengine) supp #### URL -TDengine Cloud URL. - - - - - To obtain the URL, please login [TDengine Cloud](https://cloud.tdengine.com) and click "Visualize" and then select "Google Data Studio". #### TDengine Cloud Token - - - - - To obtain the value of cloud token, please login [TDengine Cloud](https://cloud.tdengine.com) and click "Visualize" and then select "Google Data Studio". diff --git a/docs/en/12-data-subscription/index.md b/docs/en/12-data-subscription/index.md index 80b01810d8f2234607ef1986dda27794ab8612ff..c8c4eca84f3c5754b7cfecd187c3ad56fbcad31c 100644 --- a/docs/en/12-data-subscription/index.md +++ b/docs/en/12-data-subscription/index.md @@ -1,7 +1,7 @@ --- sidebar_label: Data Subscription title: Data Subscription -description: Using topics to do data subscription and share to others from TDengine. +description: Using topics to do data subscription and share to others from TDengine Cloud. --- import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; diff --git a/docs/en/12-stream.md b/docs/en/12-stream.md index 5bda5c0ce9dcfa0c54809c12467b436c2d2fa534..32d22beff440e7028cceceedd515ce5ed690fda6 100644 --- a/docs/en/12-stream.md +++ b/docs/en/12-stream.md @@ -33,7 +33,7 @@ It is common that smart electrical meter systems for businesses generate million ### Create a Database for Raw Data -Create database `power` using explore in cloud console. +Create database `power` using explorer in TDengine Cloud console. Then create four subtables as follows: diff --git a/docs/en/13-replication/index.md b/docs/en/13-replication/index.md index bd50feacdaf60b57d3502113285e3bc9948582f7..8c8ad3a2a82bdf2bfd3455c2ee462dd912b40df8 100644 --- a/docs/en/13-replication/index.md +++ b/docs/en/13-replication/index.md @@ -6,4 +6,4 @@ description: Replicate data between TDengine cloud services TDengine provides full support for data replication. You can replicate data from TDengine cloud to private TDengine instance, from private TDengine instance to TDengine cloud, or from one cloud platform to another one and it doesn't matter which cloud or region the two services reside in. -TDengine also provides database backup for enterprise plan. +TDengine also provides database backup for enterprise plan. diff --git a/docs/en/19-tools/01-cli.md b/docs/en/19-tools/01-cli.md index d807fca80f2be74b9bb30f0d6023089a51ae4d81..8e8cebf5d9e7524ebf4a6d05220f53e5b3467a68 100644 --- a/docs/en/19-tools/01-cli.md +++ b/docs/en/19-tools/01-cli.md @@ -7,7 +7,6 @@ description: Instructions and tips for using the TDengine CLI to connect TDengin import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; - The TDengine command-line interface (hereafter referred to as `TDengine CLI`) is the most simplest way for users to manipulate and interact with TDengine instances. @@ -61,22 +60,20 @@ To obtain the value of cloud DSN, please log in [TDengine Cloud](https://cloud.t ::: - - -## Connect +## Connect -To access the TDengine Cloud, you can execute `taos` if you already set the environment variable. +To access the TDengine Cloud instance, you can execute `taos` if you already set the environment variable. -``` +```bash taos ``` If you did not set environment variable for a TDengine Cloud instance, or you want to access other TDengine Cloud instances rather than the instance you already set the environment variable, you can use `taos -E ` as below. -``` +```bash taos -E $TDENGINE_CLOUD_DSN ``` @@ -85,13 +82,13 @@ taos -E $TDENGINE_CLOUD_DSN To access the TDengine Cloud, you can execute `taos` if you already set the environment variable. -``` -taos +```powershell +taos.exe ``` If you did not set environment variable for a TDengine Cloud instance, or you want to access other TDengine Cloud instances rather than the instance you already set the environment variable, you can use `taos -E ` as below. -``` +```powershell taos.exe -E $TDENGINE_CLOUD_DSN ``` @@ -100,13 +97,13 @@ taos.exe -E $TDENGINE_CLOUD_DSN To access the TDengine Cloud, you can execute `taos` if you already set the environment variable. -``` +```bash taos ``` If you did not set environment variable for a TDengine Cloud instance, or you want to access other TDengine Cloud instances rather than the instance you already set the environment variable, you can use `taos -E ` as below. -``` +```bash taos -E $TDENGINE_CLOUD_DSN ``` @@ -117,7 +114,7 @@ taos -E $TDENGINE_CLOUD_DSN TDengine CLI will display a welcome message and version information if it successfully connected to the TDengine service. If it fails, TDengine CLI will print an error message. The TDengine CLI prompts as follows: -``` +```text Welcome to the TDengine shell from Linux, Client Version:3.0.0.0 Copyright (c) 2022 by TAOS Data, Inc. All rights reserved. @@ -127,4 +124,3 @@ taos> ``` After entering the TDengine CLI, you can execute various SQL commands, including inserts, queries, or administrative commands. Please see the [official document](https://docs.tdengine.com/reference/taos-shell#execute-sql-script-file) for more details. - diff --git a/docs/en/19-tools/03-taosbenchmark.md b/docs/en/19-tools/03-taosbenchmark.md index d7317f36c75bb47d2b35a96d7c8850560489057b..0d4c22144924d63c20e08fe08fd4c66d6c3a62f4 100644 --- a/docs/en/19-tools/03-taosbenchmark.md +++ b/docs/en/19-tools/03-taosbenchmark.md @@ -9,20 +9,20 @@ description: "taosBenchmark (once called taosdemo ) is a tool for testing the pe taosBenchmark (formerly taosdemo ) is a tool for testing the performance of TDengine products. taosBenchmark can test the performance of TDengine's insert, query, and subscription functions and simulate large amounts of data generated by many devices. taosBenchmark can be configured to generate user defined databases, supertables, subtables, and the time series data to populate these for performance benchmarking. taosBenchmark is highly configurable and some of the configurations include the time interval for inserting data, the number of working threads and the capability to insert disordered data. The installer provides taosdemo as a soft link to taosBenchmark for compatibility with past users. -**Please be noted that in the context of TDengine cloud service, non privileged user can't create database using any tool, including taosBenchmark. The database needs to be firstly created in the data explorer in TDengine cloud service console. For any content about creating database in this document, the user needs to ignore and create the database manually inside TDengine cloud service.** +:::note +Please be noted that in the context of TDengine cloud service, non privileged user can't create database using any tool, including taosBenchmark. The database needs to be firstly created in the data explorer in TDengine cloud service console. For any content about creating database in this document, the user needs to ignore and create the database manually inside TDengine cloud service. +::: ## Installation -To use taosBenchmark, you need to download and install [taosTools](https://www.taosdata.com/assets-download/3.0/taosTools-2.2.7-Linux-x64.tar.gz) or any later version of v2.2.7. Before installing taosTools, please firstly download and install [TDengine CLI](https://docs.tdengine.com/cloud/tools/cli/#installation). +There are two ways to install taosBenchmark: -Decompress the package and install. +- Installing the official TDengine installer will automatically install taosBenchmark. Please refer to [TDengine installation](/operation/pkg-install) for details. + +- Compile taos-tools separately and install them. Please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository for details. -``` -tar -xzf taosTools-2.2.7-Linux-x64.tar.gz -cd taosTools-2.2.7-Linux-x64.tar.gz -sudo ./install-taostools.sh -``` ## Run + ### Configuration and running methods Run this command in your Linux terminal to save cloud DSN as variable: @@ -214,6 +214,10 @@ The parameters listed in this section apply to all function modes. `filetype` must be set to `insert` in the insertion scenario. See [General Configuration Parameters](#General Configuration Parameters) +- ** keep_trying ** : Keep trying if failed to insert, default is no. Available with v3.0.9+. + +- ** trying_interval ** : Specify interval between keep trying insert. Valid value is a postive number. Only valid when keep trying be enabled. Available with v3.0.9+. + #### Stream processing related configuration parameters The parameters for creating streams are configured in `stream` in the json configuration file, as shown below. diff --git a/docs/en/19-tools/06-taosdump.md b/docs/en/19-tools/06-taosdump.md index 1385197ee4655bf279048f40f8c4374aa9511892..11fc06e0a662c12a6674e195e8d574a8347c09fc 100644 --- a/docs/en/19-tools/06-taosdump.md +++ b/docs/en/19-tools/06-taosdump.md @@ -17,16 +17,13 @@ Users should not use taosdump to back up raw data, environment settings, hardwar ## Installation -To use taosdump, you need to download and install recent version of [taosTools](https://docs.tdengine.com/releases/tools/). Before installing taosTools, please firstly download and install the [TDengine client installation package](https://docs.tdengine.com/releases/tdengine/). +There are two ways to install taosdump: -Decompress the package and install. -``` -tar -xzf taosTools-2.2.7-Linux-x64.tar.gz -cd taosTools-2.2.7-Linux-x64.tar.gz -sudo ./install-taostools.sh -``` +- Install the taosTools official installer. Please find taosTools from [Release History](https://docs.taosdata.com/releases/tools/) page and download and install it. + +- Compile taos-tools separately and install it. Please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository for details. -Set environment variable. +Run the following command to set environment variable. ```bash export TDENGINE_CLOUD_DSN="" diff --git a/docs/en/22-user-management/01-orgs/index.md b/docs/en/22-user-management/01-orgs/index.md index ea4116ec453acaa3b5c7fa40f2c10bfab3cca94a..bed47538ca65e534ec4d71dd1559d494e117840d 100644 --- a/docs/en/22-user-management/01-orgs/index.md +++ b/docs/en/22-user-management/01-orgs/index.md @@ -4,7 +4,7 @@ title: Organization Management description: 'Organization management' --- -TDengine Cloud provides a list page for the user to manage his organizations. On this page, you can get all the organizations which you can have permission to view or edit. In each line of the organization list, you can get the name of the organization, roles which you have in the organization and the actions you can operate. +TDengine Cloud provides a list page for the user to manage his organizations. On this page, you can get all the organizations which you can have permission to view or edit. In each line of the organization list, you can get the name of the organization, roles which you have in the organization and the actions you can operate. ![Organization list](./images/orglist.webp) diff --git a/docs/en/22-user-management/index.md b/docs/en/22-user-management/index.md index 5fea58f7f2eb5e36460cd2ee63a393e4f8140441..10bdc67cf1786c6f27d2dc2a321270ce2470f0ab 100644 --- a/docs/en/22-user-management/index.md +++ b/docs/en/22-user-management/index.md @@ -19,10 +19,10 @@ The major features are listed below: 1. [Organization Management](./orgs/): Create new organizations, update their name and also can transfer the owner to some one in the organization. 2. [User Mgmt](./users/): Create, update or delete users or user groups. You can also create/edit/delete customized roles. - [User](./users/users) -3. [Admin](./admin/): Create, update or delete users or user groups. You can also create/edit/delete customized roles. -4. [Database Access Control](./db/): Create, update or delete users or user groups. You can also create/edit/delete customized roles. + -## User Stories + ```mdx-code-block import DocCardList from '@theme/DocCardList'; diff --git a/docs/examples/c/tmq.c b/docs/examples/c/tmq.c new file mode 100644 index 0000000000000000000000000000000000000000..eb41ad039a1852bb265165837d69edc3a2835684 --- /dev/null +++ b/docs/examples/c/tmq.c @@ -0,0 +1,282 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include "taos.h" + +static int running = 1; + +static int32_t msg_process(TAOS_RES* msg) { + char buf[1024]; + int32_t rows = 0; + + const char* topicName = tmq_get_topic_name(msg); + const char* dbName = tmq_get_db_name(msg); + int32_t vgroupId = tmq_get_vgroup_id(msg); + + printf("topic: %s\n", topicName); + printf("db: %s\n", dbName); + printf("vgroup id: %d\n", vgroupId); + + while (1) { + TAOS_ROW row = taos_fetch_row(msg); + if (row == NULL) break; + + TAOS_FIELD* fields = taos_fetch_fields(msg); + int32_t numOfFields = taos_field_count(msg); + // int32_t* length = taos_fetch_lengths(msg); + int32_t precision = taos_result_precision(msg); + rows++; + taos_print_row(buf, row, fields, numOfFields); + printf("precision: %d, row content: %s\n", precision, buf); + } + + return rows; +} + +static int32_t init_env() { + TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); + if (pConn == NULL) { + return -1; + } + + TAOS_RES* pRes; + // drop database if exists + printf("create database\n"); + pRes = taos_query(pConn, "drop topic topicname"); + if (taos_errno(pRes) != 0) { + printf("error in drop tmqdb, reason:%s\n", taos_errstr(pRes)); + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "drop database if exists tmqdb"); + if (taos_errno(pRes) != 0) { + printf("error in drop tmqdb, reason:%s\n", taos_errstr(pRes)); + } + taos_free_result(pRes); + + // create database + pRes = taos_query(pConn, "create database tmqdb precision 'ns'"); + if (taos_errno(pRes) != 0) { + printf("error in create tmqdb, reason:%s\n", taos_errstr(pRes)); + goto END; + } + taos_free_result(pRes); + + // create super table + printf("create super table\n"); + pRes = taos_query( + pConn, "create table tmqdb.stb (ts timestamp, c1 int, c2 float, c3 varchar(16)) tags(t1 int, t3 varchar(16))"); + if (taos_errno(pRes) != 0) { + printf("failed to create super table stb, reason:%s\n", taos_errstr(pRes)); + goto END; + } + taos_free_result(pRes); + + // create sub tables + printf("create sub tables\n"); + pRes = taos_query(pConn, "create table tmqdb.ctb0 using tmqdb.stb tags(0, 'subtable0')"); + if (taos_errno(pRes) != 0) { + printf("failed to create super table ctb0, reason:%s\n", taos_errstr(pRes)); + goto END; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "create table tmqdb.ctb1 using tmqdb.stb tags(1, 'subtable1')"); + if (taos_errno(pRes) != 0) { + printf("failed to create super table ctb1, reason:%s\n", taos_errstr(pRes)); + goto END; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "create table tmqdb.ctb2 using tmqdb.stb tags(2, 'subtable2')"); + if (taos_errno(pRes) != 0) { + printf("failed to create super table ctb2, reason:%s\n", taos_errstr(pRes)); + goto END; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "create table tmqdb.ctb3 using tmqdb.stb tags(3, 'subtable3')"); + if (taos_errno(pRes) != 0) { + printf("failed to create super table ctb3, reason:%s\n", taos_errstr(pRes)); + goto END; + } + taos_free_result(pRes); + + // insert data + printf("insert data into sub tables\n"); + pRes = taos_query(pConn, "insert into tmqdb.ctb0 values(now, 0, 0, 'a0')(now+1s, 0, 0, 'a00')"); + if (taos_errno(pRes) != 0) { + printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes)); + goto END; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "insert into tmqdb.ctb1 values(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11')"); + if (taos_errno(pRes) != 0) { + printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes)); + goto END; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "insert into tmqdb.ctb2 values(now, 2, 2, 'a1')(now+1s, 22, 22, 'a22')"); + if (taos_errno(pRes) != 0) { + printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes)); + goto END; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "insert into tmqdb.ctb3 values(now, 3, 3, 'a1')(now+1s, 33, 33, 'a33')"); + if (taos_errno(pRes) != 0) { + printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes)); + goto END; + } + taos_free_result(pRes); + taos_close(pConn); + return 0; + +END: + taos_free_result(pRes); + taos_close(pConn); + return -1; +} + +int32_t create_topic() { + printf("create topic\n"); + TAOS_RES* pRes; + TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); + if (pConn == NULL) { + return -1; + } + + pRes = taos_query(pConn, "use tmqdb"); + if (taos_errno(pRes) != 0) { + printf("error in use tmqdb, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "create topic topicname as select ts, c1, c2, c3, tbname from tmqdb.stb where c1 > 1"); + if (taos_errno(pRes) != 0) { + printf("failed to create topic topicname, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + taos_close(pConn); + return 0; +} + +void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) { + printf("tmq_commit_cb_print() code: %d, tmq: %p, param: %p\n", code, tmq, param); +} + +tmq_t* build_consumer() { + tmq_conf_res_t code; + tmq_conf_t* conf = tmq_conf_new(); + code = tmq_conf_set(conf, "enable.auto.commit", "true"); + if (TMQ_CONF_OK != code) return NULL; + code = tmq_conf_set(conf, "auto.commit.interval.ms", "1000"); + if (TMQ_CONF_OK != code) return NULL; + code = tmq_conf_set(conf, "group.id", "cgrpName"); + if (TMQ_CONF_OK != code) return NULL; + code = tmq_conf_set(conf, "client.id", "user defined name"); + if (TMQ_CONF_OK != code) return NULL; + code = tmq_conf_set(conf, "td.connect.user", "root"); + if (TMQ_CONF_OK != code) return NULL; + code = tmq_conf_set(conf, "td.connect.pass", "taosdata"); + if (TMQ_CONF_OK != code) return NULL; + code = tmq_conf_set(conf, "auto.offset.reset", "earliest"); + if (TMQ_CONF_OK != code) return NULL; + code = tmq_conf_set(conf, "experimental.snapshot.enable", "false"); + if (TMQ_CONF_OK != code) return NULL; + + tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL); + + tmq_t* tmq = tmq_consumer_new(conf, NULL, 0); + tmq_conf_destroy(conf); + return tmq; +} + +tmq_list_t* build_topic_list() { + tmq_list_t* topicList = tmq_list_new(); + int32_t code = tmq_list_append(topicList, "topicname"); + if (code) { + tmq_list_destroy(topicList); + return NULL; + } + return topicList; +} + +void basic_consume_loop(tmq_t* tmq) { + int32_t totalRows = 0; + int32_t msgCnt = 0; + int32_t timeout = 5000; + while (running) { + TAOS_RES* tmqmsg = tmq_consumer_poll(tmq, timeout); + if (tmqmsg) { + msgCnt++; + totalRows += msg_process(tmqmsg); + taos_free_result(tmqmsg); + } else { + break; + } + } + + fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows); +} + +int main(int argc, char* argv[]) { + int32_t code; + + if (init_env() < 0) { + return -1; + } + + if (create_topic() < 0) { + return -1; + } + + tmq_t* tmq = build_consumer(); + if (NULL == tmq) { + fprintf(stderr, "build_consumer() fail!\n"); + return -1; + } + + tmq_list_t* topic_list = build_topic_list(); + if (NULL == topic_list) { + return -1; + } + + if ((code = tmq_subscribe(tmq, topic_list))) { + fprintf(stderr, "Failed to tmq_subscribe(): %s\n", tmq_err2str(code)); + } + tmq_list_destroy(topic_list); + + basic_consume_loop(tmq); + + code = tmq_consumer_close(tmq); + if (code) { + fprintf(stderr, "Failed to close consumer: %s\n", tmq_err2str(code)); + } else { + fprintf(stderr, "Consumer closed\n"); + } + + return 0; +} diff --git a/docs/examples/csharp/native-example/AsyncQueryExample.cs b/docs/examples/csharp/asyncQuery/Program.cs similarity index 81% rename from docs/examples/csharp/native-example/AsyncQueryExample.cs rename to docs/examples/csharp/asyncQuery/Program.cs index 0d47325932e2f01fec8d55cfdb64c636258f4a03..864f06a15e5d7c9fb8fcfb25c81915e3f2e13f9d 100644 --- a/docs/examples/csharp/native-example/AsyncQueryExample.cs +++ b/docs/examples/csharp/asyncQuery/Program.cs @@ -11,11 +11,17 @@ namespace TDengineExample static void Main() { IntPtr conn = GetConnection(); - QueryAsyncCallback queryAsyncCallback = new QueryAsyncCallback(QueryCallback); - TDengine.QueryAsync(conn, "select * from meters", queryAsyncCallback, IntPtr.Zero); - Thread.Sleep(2000); - TDengine.Close(conn); - TDengine.Cleanup(); + try + { + QueryAsyncCallback queryAsyncCallback = new QueryAsyncCallback(QueryCallback); + TDengine.QueryAsync(conn, "select * from meters", queryAsyncCallback, IntPtr.Zero); + Thread.Sleep(2000); + } + finally + { + TDengine.Close(conn); + } + } static void QueryCallback(IntPtr param, IntPtr taosRes, int code) @@ -27,11 +33,11 @@ namespace TDengineExample } else { - Console.WriteLine($"async query data failed, failed code {code}"); + throw new Exception($"async query data failed,code:{code},reason:{TDengine.Error(taosRes)}"); } } - // Iteratively call this interface until "numOfRows" is no greater than 0. + // Iteratively call this interface until "numOfRows" is no greater than 0. static void FetchRawBlockCallback(IntPtr param, IntPtr taosRes, int numOfRows) { if (numOfRows > 0) @@ -43,7 +49,7 @@ namespace TDengineExample for (int i = 0; i < dataList.Count; i++) { - if (i != 0 && (i+1) % metaList.Count == 0) + if (i != 0 && (i + 1) % metaList.Count == 0) { Console.WriteLine("{0}\t|", dataList[i]); } @@ -63,7 +69,7 @@ namespace TDengineExample } else { - Console.WriteLine($"FetchRawBlockCallback callback error, error code {numOfRows}"); + throw new Exception($"FetchRawBlockCallback callback error, error code {numOfRows}"); } TDengine.FreeResult(taosRes); } @@ -79,8 +85,7 @@ namespace TDengineExample var conn = TDengine.Connect(host, username, password, dbname, port); if (conn == IntPtr.Zero) { - Console.WriteLine("Connect to TDengine failed"); - Environment.Exit(0); + throw new Exception("Connect to TDengine failed"); } else { diff --git a/docs/examples/csharp/native-example/asyncquery.csproj b/docs/examples/csharp/asyncQuery/asyncquery.csproj similarity index 98% rename from docs/examples/csharp/native-example/asyncquery.csproj rename to docs/examples/csharp/asyncQuery/asyncquery.csproj index 045969edd7febbd11cc6577c8ba958669a5a7e3b..7c5b693f28dfa8832ae08bbaae9aa8a367951c70 100644 --- a/docs/examples/csharp/native-example/asyncquery.csproj +++ b/docs/examples/csharp/asyncQuery/asyncquery.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/native-example/ConnectExample.cs b/docs/examples/csharp/connect/Program.cs similarity index 90% rename from docs/examples/csharp/native-example/ConnectExample.cs rename to docs/examples/csharp/connect/Program.cs index f3548ee65daab8a59695499339a8f89b0aa33a10..955db40c7c80e60350f9c0e8c6f50e7eb85246c2 100644 --- a/docs/examples/csharp/native-example/ConnectExample.cs +++ b/docs/examples/csharp/connect/Program.cs @@ -16,7 +16,7 @@ namespace TDengineExample var conn = TDengine.Connect(host, username, password, dbname, port); if (conn == IntPtr.Zero) { - Console.WriteLine("Connect to TDengine failed"); + throw new Exception("Connect to TDengine failed"); } else { diff --git a/docs/examples/csharp/native-example/connect.csproj b/docs/examples/csharp/connect/connect.csproj similarity index 98% rename from docs/examples/csharp/native-example/connect.csproj rename to docs/examples/csharp/connect/connect.csproj index 3a912f8987ace6ae540726886d901c8d32a7b81b..a08e86d4b42199be44a6551e37da11efb6e06a34 100644 --- a/docs/examples/csharp/native-example/connect.csproj +++ b/docs/examples/csharp/connect/connect.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/csharp.sln b/docs/examples/csharp/csharp.sln new file mode 100644 index 0000000000000000000000000000000000000000..560dde55cbddd4e7928598e7dd940c2721bd7b9c --- /dev/null +++ b/docs/examples/csharp/csharp.sln @@ -0,0 +1,94 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 16 +VisualStudioVersion = 16.0.30114.105 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "asyncquery", "asyncQuery\asyncquery.csproj", "{E2A5F00C-14E7-40E1-A2DE-6AB2975616D3}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "connect", "connect\connect.csproj", "{CCC5042D-93FC-4AE0-B2F6-7E692FD476B7}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "influxdbline", "influxdbLine\influxdbline.csproj", "{6A24FB80-1E3C-4E2D-A5AB-914FA583874D}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "optsJSON", "optsJSON\optsJSON.csproj", "{6725A961-0C66-4196-AC98-8D3F3D757D6C}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "optstelnet", "optsTelnet\optstelnet.csproj", "{B3B50D25-688B-44D4-8683-482ABC52FFCA}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "query", "query\query.csproj", "{F2B7D13B-FE04-4C5C-BB6D-C12E0A9D9970}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "stmtinsert", "stmtInsert\stmtinsert.csproj", "{B40D6BED-BE3C-4B44-9B12-28BE441311BA}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "subscribe", "subscribe\subscribe.csproj", "{C3D45A8E-AFC0-4547-9F3C-467B0B583DED}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "wsConnect", "wsConnect\wsConnect.csproj", "{51E19494-845E-49ED-97C7-749AE63111BD}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "wsInsert", "wsInsert\wsInsert.csproj", "{13E2233B-4AFF-40D9-AF42-AB3F01617540}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "wsQuery", "wsQuery\wsQuery.csproj", "{0F394169-C456-442C-929D-C2D43A0EEC7B}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "wsStmt", "wsStmt\wsStmt.csproj", "{27B9C9AB-9055-4BF2-8A14-4E59F09D5985}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "sqlinsert", "sqlInsert\sqlinsert.csproj", "{CD24BD12-8550-4627-A11D-707B446F48C3}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Release|Any CPU = Release|Any CPU + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {E2A5F00C-14E7-40E1-A2DE-6AB2975616D3}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {E2A5F00C-14E7-40E1-A2DE-6AB2975616D3}.Debug|Any CPU.Build.0 = Debug|Any CPU + {E2A5F00C-14E7-40E1-A2DE-6AB2975616D3}.Release|Any CPU.ActiveCfg = Release|Any CPU + {E2A5F00C-14E7-40E1-A2DE-6AB2975616D3}.Release|Any CPU.Build.0 = Release|Any CPU + {CCC5042D-93FC-4AE0-B2F6-7E692FD476B7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {CCC5042D-93FC-4AE0-B2F6-7E692FD476B7}.Debug|Any CPU.Build.0 = Debug|Any CPU + {CCC5042D-93FC-4AE0-B2F6-7E692FD476B7}.Release|Any CPU.ActiveCfg = Release|Any CPU + {CCC5042D-93FC-4AE0-B2F6-7E692FD476B7}.Release|Any CPU.Build.0 = Release|Any CPU + {6A24FB80-1E3C-4E2D-A5AB-914FA583874D}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {6A24FB80-1E3C-4E2D-A5AB-914FA583874D}.Debug|Any CPU.Build.0 = Debug|Any CPU + {6A24FB80-1E3C-4E2D-A5AB-914FA583874D}.Release|Any CPU.ActiveCfg = Release|Any CPU + {6A24FB80-1E3C-4E2D-A5AB-914FA583874D}.Release|Any CPU.Build.0 = Release|Any CPU + {6725A961-0C66-4196-AC98-8D3F3D757D6C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {6725A961-0C66-4196-AC98-8D3F3D757D6C}.Debug|Any CPU.Build.0 = Debug|Any CPU + {6725A961-0C66-4196-AC98-8D3F3D757D6C}.Release|Any CPU.ActiveCfg = Release|Any CPU + {6725A961-0C66-4196-AC98-8D3F3D757D6C}.Release|Any CPU.Build.0 = Release|Any CPU + {B3B50D25-688B-44D4-8683-482ABC52FFCA}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {B3B50D25-688B-44D4-8683-482ABC52FFCA}.Debug|Any CPU.Build.0 = Debug|Any CPU + {B3B50D25-688B-44D4-8683-482ABC52FFCA}.Release|Any CPU.ActiveCfg = Release|Any CPU + {B3B50D25-688B-44D4-8683-482ABC52FFCA}.Release|Any CPU.Build.0 = Release|Any CPU + {F2B7D13B-FE04-4C5C-BB6D-C12E0A9D9970}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {F2B7D13B-FE04-4C5C-BB6D-C12E0A9D9970}.Debug|Any CPU.Build.0 = Debug|Any CPU + {F2B7D13B-FE04-4C5C-BB6D-C12E0A9D9970}.Release|Any CPU.ActiveCfg = Release|Any CPU + {F2B7D13B-FE04-4C5C-BB6D-C12E0A9D9970}.Release|Any CPU.Build.0 = Release|Any CPU + {B40D6BED-BE3C-4B44-9B12-28BE441311BA}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {B40D6BED-BE3C-4B44-9B12-28BE441311BA}.Debug|Any CPU.Build.0 = Debug|Any CPU + {B40D6BED-BE3C-4B44-9B12-28BE441311BA}.Release|Any CPU.ActiveCfg = Release|Any CPU + {B40D6BED-BE3C-4B44-9B12-28BE441311BA}.Release|Any CPU.Build.0 = Release|Any CPU + {C3D45A8E-AFC0-4547-9F3C-467B0B583DED}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {C3D45A8E-AFC0-4547-9F3C-467B0B583DED}.Debug|Any CPU.Build.0 = Debug|Any CPU + {C3D45A8E-AFC0-4547-9F3C-467B0B583DED}.Release|Any CPU.ActiveCfg = Release|Any CPU + {C3D45A8E-AFC0-4547-9F3C-467B0B583DED}.Release|Any CPU.Build.0 = Release|Any CPU + {51E19494-845E-49ED-97C7-749AE63111BD}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {51E19494-845E-49ED-97C7-749AE63111BD}.Debug|Any CPU.Build.0 = Debug|Any CPU + {51E19494-845E-49ED-97C7-749AE63111BD}.Release|Any CPU.ActiveCfg = Release|Any CPU + {51E19494-845E-49ED-97C7-749AE63111BD}.Release|Any CPU.Build.0 = Release|Any CPU + {13E2233B-4AFF-40D9-AF42-AB3F01617540}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {13E2233B-4AFF-40D9-AF42-AB3F01617540}.Debug|Any CPU.Build.0 = Debug|Any CPU + {13E2233B-4AFF-40D9-AF42-AB3F01617540}.Release|Any CPU.ActiveCfg = Release|Any CPU + {13E2233B-4AFF-40D9-AF42-AB3F01617540}.Release|Any CPU.Build.0 = Release|Any CPU + {0F394169-C456-442C-929D-C2D43A0EEC7B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {0F394169-C456-442C-929D-C2D43A0EEC7B}.Debug|Any CPU.Build.0 = Debug|Any CPU + {0F394169-C456-442C-929D-C2D43A0EEC7B}.Release|Any CPU.ActiveCfg = Release|Any CPU + {0F394169-C456-442C-929D-C2D43A0EEC7B}.Release|Any CPU.Build.0 = Release|Any CPU + {27B9C9AB-9055-4BF2-8A14-4E59F09D5985}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {27B9C9AB-9055-4BF2-8A14-4E59F09D5985}.Debug|Any CPU.Build.0 = Debug|Any CPU + {27B9C9AB-9055-4BF2-8A14-4E59F09D5985}.Release|Any CPU.ActiveCfg = Release|Any CPU + {27B9C9AB-9055-4BF2-8A14-4E59F09D5985}.Release|Any CPU.Build.0 = Release|Any CPU + {CD24BD12-8550-4627-A11D-707B446F48C3}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {CD24BD12-8550-4627-A11D-707B446F48C3}.Debug|Any CPU.Build.0 = Debug|Any CPU + {CD24BD12-8550-4627-A11D-707B446F48C3}.Release|Any CPU.ActiveCfg = Release|Any CPU + {CD24BD12-8550-4627-A11D-707B446F48C3}.Release|Any CPU.Build.0 = Release|Any CPU + EndGlobalSection +EndGlobal diff --git a/docs/examples/csharp/native-example/InfluxDBLineExample.cs b/docs/examples/csharp/influxdbLine/Program.cs similarity index 73% rename from docs/examples/csharp/native-example/InfluxDBLineExample.cs rename to docs/examples/csharp/influxdbLine/Program.cs index 7b4453f4ac0b14dd76d166e395bdacb46a5d3fbc..fa3cb21fe04977b5081c922d623dee5514056770 100644 --- a/docs/examples/csharp/native-example/InfluxDBLineExample.cs +++ b/docs/examples/csharp/influxdbLine/Program.cs @@ -17,8 +17,7 @@ namespace TDengineExample IntPtr res = TDengine.SchemalessInsert(conn, lines, lines.Length, (int)TDengineSchemalessProtocol.TSDB_SML_LINE_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_MILLI_SECONDS); if (TDengine.ErrorNo(res) != 0) { - Console.WriteLine("SchemalessInsert failed since " + TDengine.Error(res)); - ExitProgram(conn, 1); + throw new Exception("SchemalessInsert failed since " + TDengine.Error(res)); } else { @@ -26,7 +25,6 @@ namespace TDengineExample Console.WriteLine($"SchemalessInsert success, affected {affectedRows} rows"); } TDengine.FreeResult(res); - ExitProgram(conn, 0); } static IntPtr GetConnection() @@ -39,9 +37,7 @@ namespace TDengineExample var conn = TDengine.Connect(host, username, password, dbname, port); if (conn == IntPtr.Zero) { - Console.WriteLine("Connect to TDengine failed"); - TDengine.Cleanup(); - Environment.Exit(1); + throw new Exception("Connect to TDengine failed"); } else { @@ -55,23 +51,15 @@ namespace TDengineExample IntPtr res = TDengine.Query(conn, "CREATE DATABASE test"); if (TDengine.ErrorNo(res) != 0) { - Console.WriteLine("failed to create database, reason: " + TDengine.Error(res)); - ExitProgram(conn, 1); + throw new Exception("failed to create database, reason: " + TDengine.Error(res)); } res = TDengine.Query(conn, "USE test"); if (TDengine.ErrorNo(res) != 0) { - Console.WriteLine("failed to change database, reason: " + TDengine.Error(res)); - ExitProgram(conn, 1); + throw new Exception("failed to change database, reason: " + TDengine.Error(res)); } } - static void ExitProgram(IntPtr conn, int exitCode) - { - TDengine.Close(conn); - TDengine.Cleanup(); - Environment.Exit(exitCode); - } } } diff --git a/docs/examples/csharp/native-example/influxdbline.csproj b/docs/examples/csharp/influxdbLine/influxdbline.csproj similarity index 98% rename from docs/examples/csharp/native-example/influxdbline.csproj rename to docs/examples/csharp/influxdbLine/influxdbline.csproj index 58bca485088e409fe1d387c6020418bbc2bf871b..4889f8fde9dc0eb75c0547e32355929d1cceb138 100644 --- a/docs/examples/csharp/native-example/influxdbline.csproj +++ b/docs/examples/csharp/influxdbLine/influxdbline.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/native-example/QueryExample.cs b/docs/examples/csharp/native-example/QueryExample.cs deleted file mode 100644 index d75bb8d6611f5b3899485eb1a63a42ed6995847d..0000000000000000000000000000000000000000 --- a/docs/examples/csharp/native-example/QueryExample.cs +++ /dev/null @@ -1,82 +0,0 @@ -using TDengineDriver; -using TDengineDriver.Impl; -using System.Runtime.InteropServices; - -namespace TDengineExample -{ - internal class QueryExample - { - static void Main() - { - IntPtr conn = GetConnection(); - // run query - IntPtr res = TDengine.Query(conn, "SELECT * FROM meters LIMIT 2"); - if (TDengine.ErrorNo(res) != 0) - { - Console.WriteLine("Failed to query since: " + TDengine.Error(res)); - TDengine.Close(conn); - TDengine.Cleanup(); - return; - } - - // get filed count - int fieldCount = TDengine.FieldCount(res); - Console.WriteLine("fieldCount=" + fieldCount); - - // print column names - List metas = LibTaos.GetMeta(res); - for (int i = 0; i < metas.Count; i++) - { - Console.Write(metas[i].name + "\t"); - } - Console.WriteLine(); - - // print values - List resData = LibTaos.GetData(res); - for (int i = 0; i < resData.Count; i++) - { - Console.Write($"|{resData[i].ToString()} \t"); - if (((i + 1) % metas.Count == 0)) - { - Console.WriteLine(""); - } - } - Console.WriteLine(); - - if (TDengine.ErrorNo(res) != 0) - { - Console.WriteLine($"Query is not complete, Error {TDengine.ErrorNo(res)} {TDengine.Error(res)}"); - } - // exit - TDengine.FreeResult(res); - TDengine.Close(conn); - TDengine.Cleanup(); - } - static IntPtr GetConnection() - { - string host = "localhost"; - short port = 6030; - string username = "root"; - string password = "taosdata"; - string dbname = "power"; - var conn = TDengine.Connect(host, username, password, dbname, port); - if (conn == IntPtr.Zero) - { - Console.WriteLine("Connect to TDengine failed"); - System.Environment.Exit(0); - } - else - { - Console.WriteLine("Connect to TDengine success"); - } - return conn; - } - } -} - -// output: -// Connect to TDengine success -// fieldCount=6 -// ts current voltage phase location groupid -// 1648432611249 10.3 219 0.31 California.SanFrancisco 2 -// 1648432611749 12.6 218 0.33 California.SanFrancisco 2 \ No newline at end of file diff --git a/docs/examples/csharp/native-example/SQLInsertExample.cs b/docs/examples/csharp/native-example/SQLInsertExample.cs deleted file mode 100644 index 192ea96d5713bbf7f37f2208687c41e3e66d473b..0000000000000000000000000000000000000000 --- a/docs/examples/csharp/native-example/SQLInsertExample.cs +++ /dev/null @@ -1,69 +0,0 @@ -using TDengineDriver; - - -namespace TDengineExample -{ - internal class SQLInsertExample - { - - static void Main() - { - IntPtr conn = GetConnection(); - IntPtr res = TDengine.Query(conn, "CREATE DATABASE power"); - CheckRes(conn, res, "failed to create database"); - res = TDengine.Query(conn, "USE power"); - CheckRes(conn, res, "failed to change database"); - res = TDengine.Query(conn, "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)"); - CheckRes(conn, res, "failed to create stable"); - var sql = "INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) " + - "d1002 USING power.meters TAGS('California.SanFrancisco', 3) VALUES('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) " + - "d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000)('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) " + - "d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000)('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)"; - res = TDengine.Query(conn, sql); - CheckRes(conn, res, "failed to insert data"); - int affectedRows = TDengine.AffectRows(res); - Console.WriteLine("affectedRows " + affectedRows); - ExitProgram(conn, 0); - } - - static IntPtr GetConnection() - { - string host = "localhost"; - short port = 6030; - string username = "root"; - string password = "taosdata"; - string dbname = ""; - var conn = TDengine.Connect(host, username, password, dbname, port); - if (conn == IntPtr.Zero) - { - Console.WriteLine("Connect to TDengine failed"); - Environment.Exit(0); - } - else - { - Console.WriteLine("Connect to TDengine success"); - } - return conn; - } - - static void CheckRes(IntPtr conn, IntPtr res, String errorMsg) - { - if (TDengine.ErrorNo(res) != 0) - { - Console.Write(errorMsg + " since: " + TDengine.Error(res)); - ExitProgram(conn, 1); - } - } - - static void ExitProgram(IntPtr conn, int exitCode) - { - TDengine.Close(conn); - TDengine.Cleanup(); - Environment.Exit(exitCode); - } - } -} - -// output: -// Connect to TDengine success -// affectedRows 8 diff --git a/docs/examples/csharp/native-example/SubscribeDemo.cs b/docs/examples/csharp/native-example/SubscribeDemo.cs deleted file mode 100644 index b62ff12e5ea38eb27ae5de8e8027aa41b1873d23..0000000000000000000000000000000000000000 --- a/docs/examples/csharp/native-example/SubscribeDemo.cs +++ /dev/null @@ -1,100 +0,0 @@ -using System; -using TDengineTMQ; -using TDengineDriver; -using System.Runtime.InteropServices; - -namespace TMQExample -{ - internal class SubscribeDemo - { - static void Main(string[] args) - { - IntPtr conn = GetConnection(); - string topic = "topic_example"; - Console.WriteLine($"create topic if not exist {topic} as select * from meters"); - //create topic - IntPtr res = TDengine.Query(conn, $"create topic if not exists {topic} as select * from meters"); - - if (res == IntPtr.Zero) - { - throw new Exception($"create topic failed, reason:{TDengine.Error(res)}"); - } - - var cfg = new ConsumerConfig - { - GourpId = "group_1", - TDConnectUser = "root", - TDConnectPasswd = "taosdata", - MsgWithTableName = "true", - TDConnectIp = "127.0.0.1", - }; - - // create consumer - var consumer = new ConsumerBuilder(cfg) - .Build(); - - // subscribe - consumer.Subscribe(topic); - - // consume - for (int i = 0; i < 5; i++) - { - var consumeRes = consumer.Consume(300); - // print consumeResult - foreach (KeyValuePair kv in consumeRes.Message) - { - Console.WriteLine("topic partitions:\n{0}", kv.Key.ToString()); - - kv.Value.Metas.ForEach(meta => - { - Console.Write("{0} {1}({2}) \t|", meta.name, meta.TypeName(), meta.size); - }); - Console.WriteLine(""); - kv.Value.Datas.ForEach(data => - { - Console.WriteLine(data.ToString()); - }); - } - - consumer.Commit(consumeRes); - Console.WriteLine("\n================ {0} done ", i); - - } - - // retrieve topic list - List topics = consumer.Subscription(); - topics.ForEach(t => Console.WriteLine("topic name:{0}", t)); - - - // unsubscribe - consumer.Unsubscribe(); - - // close consumer after use.Otherwise will lead memory leak. - consumer.Close(); - TDengine.Close(conn); - - - } - - static IntPtr GetConnection() - { - string host = "localhost"; - short port = 6030; - string username = "root"; - string password = "taosdata"; - string dbname = "power"; - var conn = TDengine.Connect(host, username, password, dbname, port); - if (conn == IntPtr.Zero) - { - Console.WriteLine("Connect to TDengine failed"); - System.Environment.Exit(0); - } - else - { - Console.WriteLine("Connect to TDengine success"); - } - return conn; - } - } - -} diff --git a/docs/examples/csharp/native-example/subscribe.csproj b/docs/examples/csharp/native-example/subscribe.csproj deleted file mode 100644 index eff29b3bf42bde521aae70bfd1ed555ac72bfce9..0000000000000000000000000000000000000000 --- a/docs/examples/csharp/native-example/subscribe.csproj +++ /dev/null @@ -1,15 +0,0 @@ - - - - Exe - net6.0 - enable - enable - TMQExample.SubscribeDemo - - - - - - - diff --git a/docs/examples/csharp/native-example/OptsJsonExample.cs b/docs/examples/csharp/optsJSON/Program.cs similarity index 53% rename from docs/examples/csharp/native-example/OptsJsonExample.cs rename to docs/examples/csharp/optsJSON/Program.cs index 2c41acc5c9628befda7eb4ad5c30af5b921de948..b67b5af62bf0a1fd9028125da0b665f723f2e4ec 100644 --- a/docs/examples/csharp/native-example/OptsJsonExample.cs +++ b/docs/examples/csharp/optsJSON/Program.cs @@ -7,27 +7,31 @@ namespace TDengineExample static void Main() { IntPtr conn = GetConnection(); - PrepareDatabase(conn); - string[] lines = { "[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," + + try + { + PrepareDatabase(conn); + string[] lines = { "[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," + " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, \"value\": 219, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}, " + "{\"metric\": \"meters.current\", \"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," + " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611250, \"value\": 221, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}]" }; - IntPtr res = TDengine.SchemalessInsert(conn, lines, 1, (int)TDengineSchemalessProtocol.TSDB_SML_JSON_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NOT_CONFIGURED); - if (TDengine.ErrorNo(res) != 0) - { - Console.WriteLine("SchemalessInsert failed since " + TDengine.Error(res)); - ExitProgram(conn, 1); + IntPtr res = TDengine.SchemalessInsert(conn, lines, 1, (int)TDengineSchemalessProtocol.TSDB_SML_JSON_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NOT_CONFIGURED); + if (TDengine.ErrorNo(res) != 0) + { + throw new Exception("SchemalessInsert failed since " + TDengine.Error(res)); + } + else + { + int affectedRows = TDengine.AffectRows(res); + Console.WriteLine($"SchemalessInsert success, affected {affectedRows} rows"); + } + TDengine.FreeResult(res); } - else + finally { - int affectedRows = TDengine.AffectRows(res); - Console.WriteLine($"SchemalessInsert success, affected {affectedRows} rows"); + TDengine.Close(conn); } - TDengine.FreeResult(res); - ExitProgram(conn, 0); - } static IntPtr GetConnection() { @@ -39,9 +43,7 @@ namespace TDengineExample var conn = TDengine.Connect(host, username, password, dbname, port); if (conn == IntPtr.Zero) { - Console.WriteLine("Connect to TDengine failed"); - TDengine.Cleanup(); - Environment.Exit(1); + throw new Exception("Connect to TDengine failed"); } else { @@ -55,22 +57,13 @@ namespace TDengineExample IntPtr res = TDengine.Query(conn, "CREATE DATABASE test"); if (TDengine.ErrorNo(res) != 0) { - Console.WriteLine("failed to create database, reason: " + TDengine.Error(res)); - ExitProgram(conn, 1); + throw new Exception("failed to create database, reason: " + TDengine.Error(res)); } res = TDengine.Query(conn, "USE test"); if (TDengine.ErrorNo(res) != 0) { - Console.WriteLine("failed to change database, reason: " + TDengine.Error(res)); - ExitProgram(conn, 1); + throw new Exception("failed to change database, reason: " + TDengine.Error(res)); } } - - static void ExitProgram(IntPtr conn, int exitCode) - { - TDengine.Close(conn); - TDengine.Cleanup(); - Environment.Exit(exitCode); - } } } diff --git a/docs/examples/csharp/native-example/optsjson.csproj b/docs/examples/csharp/optsJSON/optsJSON.csproj similarity index 98% rename from docs/examples/csharp/native-example/optsjson.csproj rename to docs/examples/csharp/optsJSON/optsJSON.csproj index da16025dcd45f8e5c4ba6e242524c2e56191e93c..208f04c82d19f83f2746871b64a6dfdf0dcf3eae 100644 --- a/docs/examples/csharp/native-example/optsjson.csproj +++ b/docs/examples/csharp/optsJSON/optsJSON.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/native-example/OptsTelnetExample.cs b/docs/examples/csharp/optsTelnet/Program.cs similarity index 59% rename from docs/examples/csharp/native-example/OptsTelnetExample.cs rename to docs/examples/csharp/optsTelnet/Program.cs index bb752db1afbbb2ef68df9ca25314c8b91cd9a266..e73ceb041accf88222176342d46fe1a669584211 100644 --- a/docs/examples/csharp/native-example/OptsTelnetExample.cs +++ b/docs/examples/csharp/optsTelnet/Program.cs @@ -7,8 +7,10 @@ namespace TDengineExample static void Main() { IntPtr conn = GetConnection(); - PrepareDatabase(conn); - string[] lines = { + try + { + PrepareDatabase(conn); + string[] lines = { "meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2", "meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2", "meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3", @@ -18,20 +20,22 @@ namespace TDengineExample "meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3", "meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3", }; - IntPtr res = TDengine.SchemalessInsert(conn, lines, lines.Length, (int)TDengineSchemalessProtocol.TSDB_SML_TELNET_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NOT_CONFIGURED); - if (TDengine.ErrorNo(res) != 0) - { - Console.WriteLine("SchemalessInsert failed since " + TDengine.Error(res)); - ExitProgram(conn, 1); + IntPtr res = TDengine.SchemalessInsert(conn, lines, lines.Length, (int)TDengineSchemalessProtocol.TSDB_SML_TELNET_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NOT_CONFIGURED); + if (TDengine.ErrorNo(res) != 0) + { + throw new Exception("SchemalessInsert failed since " + TDengine.Error(res)); + } + else + { + int affectedRows = TDengine.AffectRows(res); + Console.WriteLine($"SchemalessInsert success, affected {affectedRows} rows"); + } + TDengine.FreeResult(res); } - else + catch { - int affectedRows = TDengine.AffectRows(res); - Console.WriteLine($"SchemalessInsert success, affected {affectedRows} rows"); + TDengine.Close(conn); } - TDengine.FreeResult(res); - ExitProgram(conn, 0); - } static IntPtr GetConnection() { @@ -43,9 +47,7 @@ namespace TDengineExample var conn = TDengine.Connect(host, username, password, dbname, port); if (conn == IntPtr.Zero) { - Console.WriteLine("Connect to TDengine failed"); - TDengine.Cleanup(); - Environment.Exit(1); + throw new Exception("Connect to TDengine failed"); } else { @@ -59,22 +61,13 @@ namespace TDengineExample IntPtr res = TDengine.Query(conn, "CREATE DATABASE test"); if (TDengine.ErrorNo(res) != 0) { - Console.WriteLine("failed to create database, reason: " + TDengine.Error(res)); - ExitProgram(conn, 1); + throw new Exception("failed to create database, reason: " + TDengine.Error(res)); } res = TDengine.Query(conn, "USE test"); if (TDengine.ErrorNo(res) != 0) { - Console.WriteLine("failed to change database, reason: " + TDengine.Error(res)); - ExitProgram(conn, 1); + throw new Exception("failed to change database, reason: " + TDengine.Error(res)); } } - - static void ExitProgram(IntPtr conn, int exitCode) - { - TDengine.Close(conn); - TDengine.Cleanup(); - Environment.Exit(exitCode); - } } } diff --git a/docs/examples/csharp/native-example/optstelnet.csproj b/docs/examples/csharp/optsTelnet/optstelnet.csproj similarity index 98% rename from docs/examples/csharp/native-example/optstelnet.csproj rename to docs/examples/csharp/optsTelnet/optstelnet.csproj index 194de21bcc74653a2267b29681ece6243fd401fc..32c76ec4184b82e943897a36bc3bcbbd9ec85149 100644 --- a/docs/examples/csharp/native-example/optstelnet.csproj +++ b/docs/examples/csharp/optsTelnet/optstelnet.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/query/Program.cs b/docs/examples/csharp/query/Program.cs new file mode 100644 index 0000000000000000000000000000000000000000..84c7f9db1f8a87289b73662c72c63c0078b45678 --- /dev/null +++ b/docs/examples/csharp/query/Program.cs @@ -0,0 +1,80 @@ +using TDengineDriver; +using TDengineDriver.Impl; +using System.Runtime.InteropServices; + +namespace TDengineExample +{ + internal class QueryExample + { + static void Main() + { + IntPtr conn = GetConnection(); + try + { + // run query + IntPtr res = TDengine.Query(conn, "SELECT * FROM meters LIMIT 2"); + if (TDengine.ErrorNo(res) != 0) + { + throw new Exception("Failed to query since: " + TDengine.Error(res)); + } + + // get filed count + int fieldCount = TDengine.FieldCount(res); + Console.WriteLine("fieldCount=" + fieldCount); + + // print column names + List metas = LibTaos.GetMeta(res); + for (int i = 0; i < metas.Count; i++) + { + Console.Write(metas[i].name + "\t"); + } + Console.WriteLine(); + + // print values + List resData = LibTaos.GetData(res); + for (int i = 0; i < resData.Count; i++) + { + Console.Write($"|{resData[i].ToString()} \t"); + if (((i + 1) % metas.Count == 0)) + { + Console.WriteLine(""); + } + } + Console.WriteLine(); + + // Free result after use + TDengine.FreeResult(res); + } + finally + { + TDengine.Close(conn); + } + + } + static IntPtr GetConnection() + { + string host = "localhost"; + short port = 6030; + string username = "root"; + string password = "taosdata"; + string dbname = "power"; + var conn = TDengine.Connect(host, username, password, dbname, port); + if (conn == IntPtr.Zero) + { + throw new Exception("Connect to TDengine failed"); + } + else + { + Console.WriteLine("Connect to TDengine success"); + } + return conn; + } + } +} + +// output: +// Connect to TDengine success +// fieldCount=6 +// ts current voltage phase location groupid +// 1648432611249 10.3 219 0.31 California.SanFrancisco 2 +// 1648432611749 12.6 218 0.33 California.SanFrancisco 2 \ No newline at end of file diff --git a/docs/examples/csharp/native-example/query.csproj b/docs/examples/csharp/query/query.csproj similarity index 98% rename from docs/examples/csharp/native-example/query.csproj rename to docs/examples/csharp/query/query.csproj index 39fc135d5ab9f5a8397b412e2307a2306abd4f2a..360d73b2c096ef86df59876d0629fd0c4b6a239b 100644 --- a/docs/examples/csharp/native-example/query.csproj +++ b/docs/examples/csharp/query/query.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/sqlInsert/Program.cs b/docs/examples/csharp/sqlInsert/Program.cs new file mode 100644 index 0000000000000000000000000000000000000000..f23a6e1663023d1d2fafb3e92e0b605f8ac55e52 --- /dev/null +++ b/docs/examples/csharp/sqlInsert/Program.cs @@ -0,0 +1,69 @@ +using TDengineDriver; + + +namespace TDengineExample +{ + internal class SQLInsertExample + { + + static void Main() + { + IntPtr conn = GetConnection(); + try + { + IntPtr res = TDengine.Query(conn, "CREATE DATABASE power"); + CheckRes(conn, res, "failed to create database"); + res = TDengine.Query(conn, "USE power"); + CheckRes(conn, res, "failed to change database"); + res = TDengine.Query(conn, "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)"); + CheckRes(conn, res, "failed to create stable"); + var sql = "INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) " + + "d1002 USING power.meters TAGS('California.SanFrancisco', 3) VALUES('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) " + + "d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000)('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) " + + "d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000)('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)"; + res = TDengine.Query(conn, sql); + CheckRes(conn, res, "failed to insert data"); + int affectedRows = TDengine.AffectRows(res); + Console.WriteLine("affectedRows " + affectedRows); + TDengine.FreeResult(res); + } + finally + { + TDengine.Close(conn); + } + + } + + static IntPtr GetConnection() + { + string host = "localhost"; + short port = 6030; + string username = "root"; + string password = "taosdata"; + string dbname = ""; + var conn = TDengine.Connect(host, username, password, dbname, port); + if (conn == IntPtr.Zero) + { + throw new Exception("Connect to TDengine failed"); + } + else + { + Console.WriteLine("Connect to TDengine success"); + } + return conn; + } + + static void CheckRes(IntPtr conn, IntPtr res, String errorMsg) + { + if (TDengine.ErrorNo(res) != 0) + { + throw new Exception($"{errorMsg} since: {TDengine.Error(res)}"); + } + } + + } +} + +// output: +// Connect to TDengine success +// affectedRows 8 diff --git a/docs/examples/csharp/native-example/sqlinsert.csproj b/docs/examples/csharp/sqlInsert/sqlinsert.csproj similarity index 98% rename from docs/examples/csharp/native-example/sqlinsert.csproj rename to docs/examples/csharp/sqlInsert/sqlinsert.csproj index ab0e5e717a78faad07c949b434b0d0b8a26c7211..1b6f745c82437e9796da4c48fc720600dbe99cb5 100644 --- a/docs/examples/csharp/native-example/sqlinsert.csproj +++ b/docs/examples/csharp/sqlInsert/sqlinsert.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/native-example/StmtInsertExample.cs b/docs/examples/csharp/stmtInsert/Program.cs similarity index 52% rename from docs/examples/csharp/native-example/StmtInsertExample.cs rename to docs/examples/csharp/stmtInsert/Program.cs index 41a557ebd4b3371420f705f99210f0149dc95582..80cadb2ff8b596a0484d05ff15aeaa50f22ff859 100644 --- a/docs/examples/csharp/native-example/StmtInsertExample.cs +++ b/docs/examples/csharp/stmtInsert/Program.cs @@ -9,45 +9,50 @@ namespace TDengineExample static void Main() { conn = GetConnection(); - PrepareSTable(); - // 1. init and prepare - stmt = TDengine.StmtInit(conn); - if (stmt == IntPtr.Zero) + try { - Console.WriteLine("failed to init stmt, " + TDengine.Error(stmt)); - ExitProgram(); - } - int res = TDengine.StmtPrepare(stmt, "INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)"); - CheckStmtRes(res, "failed to prepare stmt"); + PrepareSTable(); + // 1. init and prepare + stmt = TDengine.StmtInit(conn); + if (stmt == IntPtr.Zero) + { + throw new Exception("failed to init stmt."); + } + int res = TDengine.StmtPrepare(stmt, "INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)"); + CheckStmtRes(res, "failed to prepare stmt"); - // 2. bind table name and tags - TAOS_MULTI_BIND[] tags = new TAOS_MULTI_BIND[2] { TaosMultiBind.MultiBindBinary(new string[]{"California.SanFrancisco"}), TaosMultiBind.MultiBindInt(new int?[] {2}) }; - res = TDengine.StmtSetTbnameTags(stmt, "d1001", tags); - CheckStmtRes(res, "failed to bind table name and tags"); + // 2. bind table name and tags + TAOS_MULTI_BIND[] tags = new TAOS_MULTI_BIND[2] { TaosMultiBind.MultiBindBinary(new string[] { "California.SanFrancisco" }), TaosMultiBind.MultiBindInt(new int?[] { 2 }) }; + res = TDengine.StmtSetTbnameTags(stmt, "d1001", tags); + CheckStmtRes(res, "failed to bind table name and tags"); - // 3. bind values - TAOS_MULTI_BIND[] values = new TAOS_MULTI_BIND[4] { + // 3. bind values + TAOS_MULTI_BIND[] values = new TAOS_MULTI_BIND[4] { TaosMultiBind.MultiBindTimestamp(new long[2] { 1648432611249, 1648432611749}), TaosMultiBind.MultiBindFloat(new float?[2] { 10.3f, 12.6f}), TaosMultiBind.MultiBindInt(new int?[2] { 219, 218}), TaosMultiBind.MultiBindFloat(new float?[2]{ 0.31f, 0.33f}) }; - res = TDengine.StmtBindParamBatch(stmt, values); - CheckStmtRes(res, "failed to bind params"); + res = TDengine.StmtBindParamBatch(stmt, values); + CheckStmtRes(res, "failed to bind params"); - // 4. add batch - res = TDengine.StmtAddBatch(stmt); - CheckStmtRes(res, "failed to add batch"); + // 4. add batch + res = TDengine.StmtAddBatch(stmt); + CheckStmtRes(res, "failed to add batch"); - // 5. execute - res = TDengine.StmtExecute(stmt); - CheckStmtRes(res, "failed to execute"); + // 5. execute + res = TDengine.StmtExecute(stmt); + CheckStmtRes(res, "failed to execute"); + + // 6. free + TaosMultiBind.FreeTaosBind(tags); + TaosMultiBind.FreeTaosBind(values); + } + finally + { + TDengine.Close(conn); + } - // 6. free - TaosMultiBind.FreeTaosBind(tags); - TaosMultiBind.FreeTaosBind(values); - TDengine.Close(conn); - TDengine.Cleanup(); } static IntPtr GetConnection() @@ -60,8 +65,7 @@ namespace TDengineExample var conn = TDengine.Connect(host, username, password, dbname, port); if (conn == IntPtr.Zero) { - Console.WriteLine("Connect to TDengine failed"); - Environment.Exit(0); + throw new Exception("Connect to TDengine failed"); } else { @@ -70,8 +74,6 @@ namespace TDengineExample return conn; } - - static void PrepareSTable() { IntPtr res = TDengine.Query(conn, "CREATE DATABASE power"); @@ -90,9 +92,8 @@ namespace TDengineExample int code = TDengine.StmtClose(stmt); if (code != 0) { - Console.WriteLine($"failed to close stmt, {code} reason: {TDengine.StmtErrorStr(stmt)} "); + throw new Exception($"failed to close stmt, {code} reason: {TDengine.StmtErrorStr(stmt)} "); } - ExitProgram(); } } @@ -100,16 +101,9 @@ namespace TDengineExample { if (TDengine.ErrorNo(res) != 0) { - Console.WriteLine(errorMsg + " since:" + TDengine.Error(res)); - ExitProgram(); + throw new Exception(errorMsg + " since:" + TDengine.Error(res)); } } - static void ExitProgram() - { - TDengine.Close(conn); - TDengine.Cleanup(); - Environment.Exit(1); - } } } diff --git a/docs/examples/csharp/native-example/stmtinsert.csproj b/docs/examples/csharp/stmtInsert/stmtinsert.csproj similarity index 98% rename from docs/examples/csharp/native-example/stmtinsert.csproj rename to docs/examples/csharp/stmtInsert/stmtinsert.csproj index 3d459fbeda02ab03dc40dac2ecae290724cccbcc..f5b2b673971c3822e6f6c9b65b8f02bc9d4dc80e 100644 --- a/docs/examples/csharp/native-example/stmtinsert.csproj +++ b/docs/examples/csharp/stmtInsert/stmtinsert.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/subscribe/subscribe.csproj b/docs/examples/csharp/subscribe/subscribe.csproj index 8ae1cf6bc6023558c28797a0d9fcccb2f2e87653..191b3f9e9bb07dc72c9bb452ad19e30e42af922a 100644 --- a/docs/examples/csharp/subscribe/subscribe.csproj +++ b/docs/examples/csharp/subscribe/subscribe.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/wsConnect/Program.cs b/docs/examples/csharp/wsConnect/Program.cs new file mode 100644 index 0000000000000000000000000000000000000000..a534bb8a6582be3b8da9ddd73daf39717909b40b --- /dev/null +++ b/docs/examples/csharp/wsConnect/Program.cs @@ -0,0 +1,28 @@ +using System; +using TDengineWS.Impl; + +namespace Examples +{ + public class WSConnExample + { + static int Main(string[] args) + { + string DSN = "ws://root:taosdata@127.0.0.1:6041/test"; + IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN); + + if (wsConn == IntPtr.Zero) + { + Console.WriteLine("get WS connection failed"); + return -1; + } + else + { + Console.WriteLine("Establish connect success."); + // close connection. + LibTaosWS.WSClose(wsConn); + } + + return 0; + } + } +} diff --git a/docs/examples/csharp/wsConnect/wsConnect.csproj b/docs/examples/csharp/wsConnect/wsConnect.csproj new file mode 100644 index 0000000000000000000000000000000000000000..c7988b6e9c955f21d53a312e10b513403363936e --- /dev/null +++ b/docs/examples/csharp/wsConnect/wsConnect.csproj @@ -0,0 +1,18 @@ + + + + Exe + net6.0 + + + + + + + + + + + + + diff --git a/docs/examples/csharp/wsInsert/Program.cs b/docs/examples/csharp/wsInsert/Program.cs new file mode 100644 index 0000000000000000000000000000000000000000..7fa6af805c5f6fc136c89c903e20636eaaa0a0fc --- /dev/null +++ b/docs/examples/csharp/wsInsert/Program.cs @@ -0,0 +1,61 @@ +using System; +using TDengineWS.Impl; + +namespace Examples +{ + public class WSInsertExample + { + static int Main(string[] args) + { + string DSN = "ws://root:taosdata@127.0.0.1:6041/test"; + IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN); + + // Assert if connection is validate + if (wsConn == IntPtr.Zero) + { + Console.WriteLine("get WS connection failed"); + return -1; + } + else + { + Console.WriteLine("Establish connect success."); + } + + string createTable = "CREATE STABLE test.meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);"; + string insert = "INSERT INTO test.d1001 USING test.meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)" + + "test.d1002 USING test.meters TAGS('California.SanFrancisco', 3) VALUES('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)" + + "test.d1003 USING test.meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000)('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) " + + "test.d1004 USING test.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000)('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)"; + + IntPtr wsRes = LibTaosWS.WSQuery(wsConn, createTable); + ValidInsert("create table", wsRes); + LibTaosWS.WSFreeResult(wsRes); + + wsRes = LibTaosWS.WSQuery(wsConn, insert); + ValidInsert("insert data", wsRes); + LibTaosWS.WSFreeResult(wsRes); + + // close connection. + LibTaosWS.WSClose(wsConn); + + return 0; + } + + static void ValidInsert(string desc, IntPtr wsRes) + { + int code = LibTaosWS.WSErrorNo(wsRes); + if (code != 0) + { + Console.WriteLine($"execute SQL failed: reason: {LibTaosWS.WSErrorStr(wsRes)}, code:{code}"); + } + else + { + Console.WriteLine("{0} success affect {2} rows, cost {1} nanoseconds", desc, LibTaosWS.WSTakeTiming(wsRes), LibTaosWS.WSAffectRows(wsRes)); + } + } + } + +} +// Establish connect success. +// create table success affect 0 rows, cost 3717542 nanoseconds +// insert data success affect 8 rows, cost 2613637 nanoseconds diff --git a/docs/examples/csharp/wsInsert/wsInsert.csproj b/docs/examples/csharp/wsInsert/wsInsert.csproj new file mode 100644 index 0000000000000000000000000000000000000000..5aa419b2c80bf303e84ff3f1a30a839ce9220663 --- /dev/null +++ b/docs/examples/csharp/wsInsert/wsInsert.csproj @@ -0,0 +1,17 @@ + + + + Exe + net6.0 + enable + + + + + + + + + + + diff --git a/docs/examples/csharp/wsQuery/Program.cs b/docs/examples/csharp/wsQuery/Program.cs new file mode 100644 index 0000000000000000000000000000000000000000..8ee900a05aaef527da83e88340057014a17d9053 --- /dev/null +++ b/docs/examples/csharp/wsQuery/Program.cs @@ -0,0 +1,79 @@ +using System; +using TDengineWS.Impl; +using System.Collections.Generic; +using TDengineDriver; + +namespace Examples +{ + public class WSQueryExample + { + static int Main(string[] args) + { + string DSN = "ws://root:taosdata@127.0.0.1:6041/test"; + IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN); + if (wsConn == IntPtr.Zero) + { + Console.WriteLine("get WS connection failed"); + return -1; + } + else + { + Console.WriteLine("Establish connect success."); + } + + string select = "select * from test.meters"; + + // optional:wsRes = LibTaosWS.WSQuery(wsConn, select); + IntPtr wsRes = LibTaosWS.WSQueryTimeout(wsConn, select, 1); + // Assert if query execute success. + int code = LibTaosWS.WSErrorNo(wsRes); + if (code != 0) + { + Console.WriteLine($"execute SQL failed: reason: {LibTaosWS.WSErrorStr(wsRes)}, code:{code}"); + LibTaosWS.WSFreeResult(wsRes); + return -1; + } + + // get meta data + List metas = LibTaosWS.WSGetFields(wsRes); + // get retrieved data + List dataSet = LibTaosWS.WSGetData(wsRes); + + // do something with result. + foreach (var meta in metas) + { + Console.Write("{0} {1}({2}) \t|\t", meta.name, meta.TypeName(), meta.size); + } + Console.WriteLine(""); + + for (int i = 0; i < dataSet.Count;) + { + for (int j = 0; j < metas.Count; j++) + { + Console.Write("{0}\t|\t", dataSet[i]); + i++; + } + Console.WriteLine(""); + } + + // Free result after use. + LibTaosWS.WSFreeResult(wsRes); + + // close connection. + LibTaosWS.WSClose(wsConn); + + return 0; + } + } +} + +// Establish connect success. +// ts TIMESTAMP(8) | current FLOAT(4) | voltage INT(4) | phase FLOAT(4) | location BINARY(64) | groupid INT(4) | +// 1538548685000 | 10.8 | 223 | 0.29 | California.LosAngeles | 3 | +// 1538548686500 | 11.5 | 221 | 0.35 | California.LosAngeles | 3 | +// 1538548685500 | 11.8 | 221 | 0.28 | California.LosAngeles | 2 | +// 1538548696600 | 13.4 | 223 | 0.29 | California.LosAngeles | 2 | +// 1538548685000 | 10.3 | 219 | 0.31 | California.SanFrancisco | 2 | +// 1538548695000 | 12.6 | 218 | 0.33 | California.SanFrancisco | 2 | +// 1538548696800 | 12.3 | 221 | 0.31 | California.SanFrancisco | 2 | +// 1538548696650 | 10.3 | 218 | 0.25 | California.SanFrancisco | 3 | diff --git a/docs/examples/csharp/wsQuery/wsQuery.csproj b/docs/examples/csharp/wsQuery/wsQuery.csproj new file mode 100644 index 0000000000000000000000000000000000000000..bcc7c19a59fdab1c8ad9d3098824239d645743e6 --- /dev/null +++ b/docs/examples/csharp/wsQuery/wsQuery.csproj @@ -0,0 +1,19 @@ + + + + Exe + net6.0 + enable + + + + + + + + + + + + + diff --git a/docs/examples/csharp/wsStmt/Program.cs b/docs/examples/csharp/wsStmt/Program.cs new file mode 100644 index 0000000000000000000000000000000000000000..f8673357db6caef676294fa3d505f5bd4c2872c8 --- /dev/null +++ b/docs/examples/csharp/wsStmt/Program.cs @@ -0,0 +1,98 @@ +using System; +using TDengineWS.Impl; +using TDengineDriver; +using System.Runtime.InteropServices; + +namespace Examples +{ + public class WSStmtExample + { + static int Main(string[] args) + { + const string DSN = "ws://root:taosdata@127.0.0.1:6041/test"; + const string table = "meters"; + const string database = "test"; + const string childTable = "d1005"; + string insert = $"insert into ? using {database}.{table} tags(?,?) values(?,?,?,?)"; + const int numOfTags = 2; + const int numOfColumns = 4; + + // Establish connection + IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN); + if (wsConn == IntPtr.Zero) + { + Console.WriteLine($"get WS connection failed"); + return -1; + } + else + { + Console.WriteLine("Establish connect success..."); + } + + // init stmt + IntPtr wsStmt = LibTaosWS.WSStmtInit(wsConn); + if (wsStmt != IntPtr.Zero) + { + int code = LibTaosWS.WSStmtPrepare(wsStmt, insert); + ValidStmtStep(code, wsStmt, "WSStmtPrepare"); + + TAOS_MULTI_BIND[] wsTags = new TAOS_MULTI_BIND[] { WSMultiBind.WSBindNchar(new string[] { "California.SanDiego" }), WSMultiBind.WSBindInt(new int?[] { 4 }) }; + code = LibTaosWS.WSStmtSetTbnameTags(wsStmt, $"{database}.{childTable}", wsTags, numOfTags); + ValidStmtStep(code, wsStmt, "WSStmtSetTbnameTags"); + + TAOS_MULTI_BIND[] data = new TAOS_MULTI_BIND[4]; + data[0] = WSMultiBind.WSBindTimestamp(new long[] { 1538548687000, 1538548688000, 1538548689000, 1538548690000, 1538548691000 }); + data[1] = WSMultiBind.WSBindFloat(new float?[] { 10.30F, 10.40F, 10.50F, 10.60F, 10.70F }); + data[2] = WSMultiBind.WSBindInt(new int?[] { 223, 221, 222, 220, 219 }); + data[3] = WSMultiBind.WSBindFloat(new float?[] { 0.31F, 0.32F, 0.33F, 0.35F, 0.28F }); + code = LibTaosWS.WSStmtBindParamBatch(wsStmt, data, numOfColumns); + ValidStmtStep(code, wsStmt, "WSStmtBindParamBatch"); + + code = LibTaosWS.WSStmtAddBatch(wsStmt); + ValidStmtStep(code, wsStmt, "WSStmtAddBatch"); + + IntPtr stmtAffectRowPtr = Marshal.AllocHGlobal(Marshal.SizeOf(typeof(Int32))); + code = LibTaosWS.WSStmtExecute(wsStmt, stmtAffectRowPtr); + ValidStmtStep(code, wsStmt, "WSStmtExecute"); + Console.WriteLine("WS STMT insert {0} rows...", Marshal.ReadInt32(stmtAffectRowPtr)); + Marshal.FreeHGlobal(stmtAffectRowPtr); + + LibTaosWS.WSStmtClose(wsStmt); + + // Free unmanaged memory + WSMultiBind.WSFreeTaosBind(wsTags); + WSMultiBind.WSFreeTaosBind(data); + + //check result with SQL "SELECT * FROM test.d1005;" + } + else + { + Console.WriteLine("Init STMT failed..."); + } + + // close connection. + LibTaosWS.WSClose(wsConn); + + return 0; + } + + static void ValidStmtStep(int code, IntPtr wsStmt, string desc) + { + if (code != 0) + { + Console.WriteLine($"{desc} failed,reason: {LibTaosWS.WSErrorStr(wsStmt)}, code: {code}"); + } + else + { + Console.WriteLine("{0} success...", desc); + } + } + } +} + +// WSStmtPrepare success... +// WSStmtSetTbnameTags success... +// WSStmtBindParamBatch success... +// WSStmtAddBatch success... +// WSStmtExecute success... +// WS STMT insert 5 rows... diff --git a/docs/examples/csharp/wsStmt/wsStmt.csproj b/docs/examples/csharp/wsStmt/wsStmt.csproj new file mode 100644 index 0000000000000000000000000000000000000000..bcc7c19a59fdab1c8ad9d3098824239d645743e6 --- /dev/null +++ b/docs/examples/csharp/wsStmt/wsStmt.csproj @@ -0,0 +1,19 @@ + + + + Exe + net6.0 + enable + + + + + + + + + + + + + diff --git a/docs/examples/java/src/main/java/com/taos/example/RestInsertExample.java b/docs/examples/java/src/main/java/com/taos/example/RestInsertExample.java index af97fe4373ca964260e5614f133f359e229b0e15..9d85bf2a94abda71bcdab89d46008b70e52ce437 100644 --- a/docs/examples/java/src/main/java/com/taos/example/RestInsertExample.java +++ b/docs/examples/java/src/main/java/com/taos/example/RestInsertExample.java @@ -16,14 +16,14 @@ public class RestInsertExample { private static List getRawData() { return Arrays.asList( - "d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,California.SanFrancisco,2", - "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,California.SanFrancisco,2", - "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,California.SanFrancisco,2", - "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,California.SanFrancisco,3", - "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,California.LosAngeles,2", - "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,California.LosAngeles,2", - "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,California.LosAngeles,3", - "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,California.LosAngeles,3" + "d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,'California.SanFrancisco',2", + "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,'California.SanFrancisco',2", + "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,'California.SanFrancisco',2", + "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,'California.SanFrancisco',3", + "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,'California.LosAngeles',2", + "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,'California.LosAngeles',2", + "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,'California.LosAngeles',3", + "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,'California.LosAngeles',3" ); } diff --git a/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java b/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java index 50e8b357719fc6d1f4707e474afdf58fb4531970..e9af5e9ce0c0473f4513cbb949dcbd9f433c0c92 100644 --- a/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java +++ b/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java @@ -38,12 +38,12 @@ public class SubscribeDemo { statement.executeUpdate("create database " + DB_NAME); statement.executeUpdate("use " + DB_NAME); statement.executeUpdate( - "CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT) TAGS (`groupid` INT, `location` BINARY(16))"); - statement.executeUpdate("CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')"); + "CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT) TAGS (`groupid` INT, `location` BINARY(24))"); + statement.executeUpdate("CREATE TABLE `d0` USING `meters` TAGS(0, 'California.LosAngles')"); statement.executeUpdate("INSERT INTO `d0` values(now - 10s, 0.32, 116)"); statement.executeUpdate("INSERT INTO `d0` values(now - 8s, NULL, NULL)"); statement.executeUpdate( - "INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119)"); + "INSERT INTO `d1` USING `meters` TAGS(1, 'California.SanFrancisco') values(now - 9s, 10.1, 119)"); statement.executeUpdate( "INSERT INTO `d1` values (now-8s, 10, 120) (now - 6s, 10, 119) (now - 4s, 11.2, 118)"); // create topic @@ -57,7 +57,7 @@ public class SubscribeDemo { properties.setProperty(TMQConstants.ENABLE_AUTO_COMMIT, "true"); properties.setProperty(TMQConstants.GROUP_ID, "test"); properties.setProperty(TMQConstants.VALUE_DESERIALIZER, - "com.taosdata.jdbc.MetersDeserializer"); + "com.taos.example.MetersDeserializer"); // poll data try (TaosConsumer consumer = new TaosConsumer<>(properties)) { @@ -75,4 +75,4 @@ public class SubscribeDemo { } timer.cancel(); } -} \ No newline at end of file +} diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java new file mode 100644 index 0000000000000000000000000000000000000000..04b149a4b96441ecfd1b0bdde54c9ed71349cab2 --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java @@ -0,0 +1,63 @@ +package com.taos.example.highvolume; + +import java.sql.*; + +/** + * Prepare target database. + * Count total records in database periodically so that we can estimate the writing speed. + */ +public class DataBaseMonitor { + private Connection conn; + private Statement stmt; + + public DataBaseMonitor init() throws SQLException { + if (conn == null) { + String jdbcURL = System.getenv("TDENGINE_JDBC_URL"); + conn = DriverManager.getConnection(jdbcURL); + stmt = conn.createStatement(); + } + return this; + } + + public void close() { + try { + stmt.close(); + } catch (SQLException e) { + } + try { + conn.close(); + } catch (SQLException e) { + } + } + + public void prepareDatabase() throws SQLException { + stmt.execute("DROP DATABASE IF EXISTS test"); + stmt.execute("CREATE DATABASE test"); + stmt.execute("CREATE STABLE test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)"); + } + + public Long count() throws SQLException { + if (!stmt.isClosed()) { + ResultSet result = stmt.executeQuery("SELECT count(*) from test.meters"); + result.next(); + return result.getLong(1); + } + return null; + } + + /** + * show test.stables; + * + * name | created_time | columns | tags | tables | + * ============================================================================================ + * meters | 2022-07-20 08:39:30.902 | 4 | 2 | 620000 | + */ + public Long getTableCount() throws SQLException { + if (!stmt.isClosed()) { + ResultSet result = stmt.executeQuery("show test.stables"); + result.next(); + return result.getLong(5); + } + return null; + } +} \ No newline at end of file diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java new file mode 100644 index 0000000000000000000000000000000000000000..41b59551ca69a4056c2f2b572d169bd08dc4fcfe --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java @@ -0,0 +1,70 @@ +package com.taos.example.highvolume; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.*; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; + + +public class FastWriteExample { + final static Logger logger = LoggerFactory.getLogger(FastWriteExample.class); + + final static int taskQueueCapacity = 1000000; + final static List> taskQueues = new ArrayList<>(); + final static List readTasks = new ArrayList<>(); + final static List writeTasks = new ArrayList<>(); + final static DataBaseMonitor databaseMonitor = new DataBaseMonitor(); + + public static void stopAll() { + logger.info("shutting down"); + readTasks.forEach(task -> task.stop()); + writeTasks.forEach(task -> task.stop()); + databaseMonitor.close(); + } + + public static void main(String[] args) throws InterruptedException, SQLException { + int readTaskCount = args.length > 0 ? Integer.parseInt(args[0]) : 1; + int writeTaskCount = args.length > 1 ? Integer.parseInt(args[1]) : 3; + int tableCount = args.length > 2 ? Integer.parseInt(args[2]) : 1000; + int maxBatchSize = args.length > 3 ? Integer.parseInt(args[3]) : 3000; + + logger.info("readTaskCount={}, writeTaskCount={} tableCount={} maxBatchSize={}", + readTaskCount, writeTaskCount, tableCount, maxBatchSize); + + databaseMonitor.init().prepareDatabase(); + + // Create task queues, whiting tasks and start writing threads. + for (int i = 0; i < writeTaskCount; ++i) { + BlockingQueue queue = new ArrayBlockingQueue<>(taskQueueCapacity); + taskQueues.add(queue); + WriteTask task = new WriteTask(queue, maxBatchSize); + Thread t = new Thread(task); + t.setName("WriteThread-" + i); + t.start(); + } + + // create reading tasks and start reading threads + int tableCountPerTask = tableCount / readTaskCount; + for (int i = 0; i < readTaskCount; ++i) { + ReadTask task = new ReadTask(i, taskQueues, tableCountPerTask); + Thread t = new Thread(task); + t.setName("ReadThread-" + i); + t.start(); + } + + Runtime.getRuntime().addShutdownHook(new Thread(FastWriteExample::stopAll)); + + long lastCount = 0; + while (true) { + Thread.sleep(10000); + long numberOfTable = databaseMonitor.getTableCount(); + long count = databaseMonitor.count(); + logger.info("numberOfTable={} count={} speed={}", numberOfTable, count, (count - lastCount) / 10); + lastCount = count; + } + } +} \ No newline at end of file diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java new file mode 100644 index 0000000000000000000000000000000000000000..f0ebc53b4b9a588ac4a23461553dd5c9f1a9f00b --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java @@ -0,0 +1,53 @@ +package com.taos.example.highvolume; + +import java.util.Iterator; + +/** + * Generate test data + */ +class MockDataSource implements Iterator { + private String tbNamePrefix; + private int tableCount; + private long maxRowsPerTable = 1000000000L; + + // 100 milliseconds between two neighbouring rows. + long startMs = System.currentTimeMillis() - maxRowsPerTable * 100; + private int currentRow = 0; + private int currentTbId = -1; + + // mock values + String[] location = {"California.LosAngeles", "California.SanDiego", "California.SanJose", "California.Campbell", "California.SanFrancisco"}; + float[] current = {8.8f, 10.7f, 9.9f, 8.9f, 9.4f}; + int[] voltage = {119, 116, 111, 113, 118}; + float[] phase = {0.32f, 0.34f, 0.33f, 0.329f, 0.141f}; + + public MockDataSource(String tbNamePrefix, int tableCount) { + this.tbNamePrefix = tbNamePrefix; + this.tableCount = tableCount; + } + + @Override + public boolean hasNext() { + currentTbId += 1; + if (currentTbId == tableCount) { + currentTbId = 0; + currentRow += 1; + } + return currentRow < maxRowsPerTable; + } + + @Override + public String next() { + long ts = startMs + 100 * currentRow; + int groupId = currentTbId % 5 == 0 ? currentTbId / 5 : currentTbId / 5 + 1; + StringBuilder sb = new StringBuilder(tbNamePrefix + "_" + currentTbId + ","); // tbName + sb.append(ts).append(','); // ts + sb.append(current[currentRow % 5]).append(','); // current + sb.append(voltage[currentRow % 5]).append(','); // voltage + sb.append(phase[currentRow % 5]).append(','); // phase + sb.append(location[currentRow % 5]).append(','); // location + sb.append(groupId); // groupID + + return sb.toString(); + } +} diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java new file mode 100644 index 0000000000000000000000000000000000000000..a6fcfed1d28281d46aff493ef9783972858ebe62 --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java @@ -0,0 +1,58 @@ +package com.taos.example.highvolume; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.BlockingQueue; + +class ReadTask implements Runnable { + private final static Logger logger = LoggerFactory.getLogger(ReadTask.class); + private final int taskId; + private final List> taskQueues; + private final int queueCount; + private final int tableCount; + private boolean active = true; + + public ReadTask(int readTaskId, List> queues, int tableCount) { + this.taskId = readTaskId; + this.taskQueues = queues; + this.queueCount = queues.size(); + this.tableCount = tableCount; + } + + /** + * Assign data received to different queues. + * Here we use the suffix number in table name. + * You are expected to define your own rule in practice. + * + * @param line record received + * @return which queue to use + */ + public int getQueueId(String line) { + String tbName = line.substring(0, line.indexOf(',')); // For example: tb1_101 + String suffixNumber = tbName.split("_")[1]; + return Integer.parseInt(suffixNumber) % this.queueCount; + } + + @Override + public void run() { + logger.info("started"); + Iterator it = new MockDataSource("tb" + this.taskId, tableCount); + try { + while (it.hasNext() && active) { + String line = it.next(); + int queueId = getQueueId(line); + taskQueues.get(queueId).put(line); + } + } catch (Exception e) { + logger.error("Read Task Error", e); + } + } + + public void stop() { + logger.info("stop"); + this.active = false; + } +} \ No newline at end of file diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java new file mode 100644 index 0000000000000000000000000000000000000000..c2989acdbe3d0f56d7451ac86051a55955ce14de --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java @@ -0,0 +1,205 @@ +package com.taos.example.highvolume; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.*; +import java.util.HashMap; +import java.util.Map; + +/** + * A helper class encapsulate the logic of writing using SQL. + *

+ * The main interfaces are two methods: + *

    + *
  1. {@link SQLWriter#processLine}, which receive raw lines from WriteTask and group them by table names.
  2. + *
  3. {@link SQLWriter#flush}, which assemble INSERT statement and execute it.
  4. + *
+ *

+ * There is a technical skill worth mentioning: we create table as needed when "table does not exist" error occur instead of creating table automatically using syntax "INSET INTO tb USING stb". + * This ensure that checking table existence is a one-time-only operation. + *

+ * + *

+ */ +public class SQLWriter { + final static Logger logger = LoggerFactory.getLogger(SQLWriter.class); + + private Connection conn; + private Statement stmt; + + /** + * current number of buffered records + */ + private int bufferedCount = 0; + /** + * Maximum number of buffered records. + * Flush action will be triggered if bufferedCount reached this value, + */ + private int maxBatchSize; + + + /** + * Maximum SQL length. + */ + private int maxSQLLength; + + /** + * Map from table name to column values. For example: + * "tb001" -> "(1648432611249,2.1,114,0.09) (1648432611250,2.2,135,0.2)" + */ + private Map tbValues = new HashMap<>(); + + /** + * Map from table name to tag values in the same order as creating stable. + * Used for creating table. + */ + private Map tbTags = new HashMap<>(); + + public SQLWriter(int maxBatchSize) { + this.maxBatchSize = maxBatchSize; + } + + + /** + * Get Database Connection + * + * @return Connection + * @throws SQLException + */ + private static Connection getConnection() throws SQLException { + String jdbcURL = System.getenv("TDENGINE_JDBC_URL"); + return DriverManager.getConnection(jdbcURL); + } + + /** + * Create Connection and Statement + * + * @throws SQLException + */ + public void init() throws SQLException { + conn = getConnection(); + stmt = conn.createStatement(); + stmt.execute("use test"); + ResultSet rs = stmt.executeQuery("show variables"); + while (rs.next()) { + String configName = rs.getString(1); + if ("maxSQLLength".equals(configName)) { + maxSQLLength = Integer.parseInt(rs.getString(2)); + logger.info("maxSQLLength={}", maxSQLLength); + } + } + } + + /** + * Convert raw data to SQL fragments, group them by table name and cache them in a HashMap. + * Trigger writing when number of buffered records reached maxBachSize. + * + * @param line raw data get from task queue in format: tbName,ts,current,voltage,phase,location,groupId + */ + public void processLine(String line) throws SQLException { + bufferedCount += 1; + int firstComma = line.indexOf(','); + String tbName = line.substring(0, firstComma); + int lastComma = line.lastIndexOf(','); + int secondLastComma = line.lastIndexOf(',', lastComma - 1); + String value = "(" + line.substring(firstComma + 1, secondLastComma) + ") "; + if (tbValues.containsKey(tbName)) { + tbValues.put(tbName, tbValues.get(tbName) + value); + } else { + tbValues.put(tbName, value); + } + if (!tbTags.containsKey(tbName)) { + String location = line.substring(secondLastComma + 1, lastComma); + String groupId = line.substring(lastComma + 1); + String tagValues = "('" + location + "'," + groupId + ')'; + tbTags.put(tbName, tagValues); + } + if (bufferedCount == maxBatchSize) { + flush(); + } + } + + + /** + * Assemble INSERT statement using buffered SQL fragments in Map {@link SQLWriter#tbValues} and execute it. + * In case of "Table does not exit" exception, create all tables in the sql and retry the sql. + */ + public void flush() throws SQLException { + StringBuilder sb = new StringBuilder("INSERT INTO "); + for (Map.Entry entry : tbValues.entrySet()) { + String tableName = entry.getKey(); + String values = entry.getValue(); + String q = tableName + " values " + values + " "; + if (sb.length() + q.length() > maxSQLLength) { + executeSQL(sb.toString()); + logger.warn("increase maxSQLLength or decrease maxBatchSize to gain better performance"); + sb = new StringBuilder("INSERT INTO "); + } + sb.append(q); + } + executeSQL(sb.toString()); + tbValues.clear(); + bufferedCount = 0; + } + + private void executeSQL(String sql) throws SQLException { + try { + stmt.executeUpdate(sql); + } catch (SQLException e) { + // convert to error code defined in taoserror.h + int errorCode = e.getErrorCode() & 0xffff; + if (errorCode == 0x362 || errorCode == 0x218) { + // Table does not exist + createTables(); + executeSQL(sql); + } else { + logger.error("Execute SQL: {}", sql); + throw e; + } + } catch (Throwable throwable) { + logger.error("Execute SQL: {}", sql); + throw throwable; + } + } + + /** + * Create tables in batch using syntax: + *

+ * CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) [IF NOT EXISTS] tb_name2 USING stb_name TAGS (tag_value2, ...) ...; + *

+ */ + private void createTables() throws SQLException { + StringBuilder sb = new StringBuilder("CREATE TABLE "); + for (String tbName : tbValues.keySet()) { + String tagValues = tbTags.get(tbName); + sb.append("IF NOT EXISTS ").append(tbName).append(" USING meters TAGS ").append(tagValues).append(" "); + } + String sql = sb.toString(); + try { + stmt.executeUpdate(sql); + } catch (Throwable throwable) { + logger.error("Execute SQL: {}", sql); + throw throwable; + } + } + + public boolean hasBufferedValues() { + return bufferedCount > 0; + } + + public int getBufferedCount() { + return bufferedCount; + } + + public void close() { + try { + stmt.close(); + } catch (SQLException e) { + } + try { + conn.close(); + } catch (SQLException e) { + } + } +} \ No newline at end of file diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/StmtWriter.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/StmtWriter.java new file mode 100644 index 0000000000000000000000000000000000000000..8ade06625d708a112c85d5657aa00bcd0e605ff4 --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/StmtWriter.java @@ -0,0 +1,4 @@ +package com.taos.example.highvolume; + +public class StmtWriter { +} diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java new file mode 100644 index 0000000000000000000000000000000000000000..de9e5463d7dc59478f991e4783aacaae527b4c4b --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java @@ -0,0 +1,58 @@ +package com.taos.example.highvolume; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.concurrent.BlockingQueue; + +class WriteTask implements Runnable { + private final static Logger logger = LoggerFactory.getLogger(WriteTask.class); + private final int maxBatchSize; + + // the queue from which this writing task get raw data. + private final BlockingQueue queue; + + // A flag indicate whether to continue. + private boolean active = true; + + public WriteTask(BlockingQueue taskQueue, int maxBatchSize) { + this.queue = taskQueue; + this.maxBatchSize = maxBatchSize; + } + + @Override + public void run() { + logger.info("started"); + String line = null; // data getting from the queue just now. + SQLWriter writer = new SQLWriter(maxBatchSize); + try { + writer.init(); + while (active) { + line = queue.poll(); + if (line != null) { + // parse raw data and buffer the data. + writer.processLine(line); + } else if (writer.hasBufferedValues()) { + // write data immediately if no more data in the queue + writer.flush(); + } else { + // sleep a while to avoid high CPU usage if no more data in the queue and no buffered records, . + Thread.sleep(100); + } + } + if (writer.hasBufferedValues()) { + writer.flush(); + } + } catch (Exception e) { + String msg = String.format("line=%s, bufferedCount=%s", line, writer.getBufferedCount()); + logger.error(msg, e); + } finally { + writer.close(); + } + } + + public void stop() { + logger.info("stop"); + this.active = false; + } +} \ No newline at end of file diff --git a/docs/examples/python/conn_native_pandas.py b/docs/examples/python/conn_native_pandas.py index 56942ef57085766cd128b03cabb7a357587eab16..f3bab15efbe6669a88828fb194682dbfedb382df 100644 --- a/docs/examples/python/conn_native_pandas.py +++ b/docs/examples/python/conn_native_pandas.py @@ -1,8 +1,11 @@ import pandas -from sqlalchemy import create_engine +from sqlalchemy import create_engine, text engine = create_engine("taos://root:taosdata@localhost:6030/power") -df = pandas.read_sql("SELECT * FROM meters", engine) +conn = engine.connect() +df = pandas.read_sql(text("SELECT * FROM power.meters"), conn) +conn.close() + # print index print(df.index) diff --git a/docs/examples/python/conn_rest_pandas.py b/docs/examples/python/conn_rest_pandas.py index 0164080cd5a05e72dce40b1d111ea423623ff9b2..1b207d6ff10a353f3473116ce807cc8daf362ca7 100644 --- a/docs/examples/python/conn_rest_pandas.py +++ b/docs/examples/python/conn_rest_pandas.py @@ -1,8 +1,10 @@ import pandas -from sqlalchemy import create_engine +from sqlalchemy import create_engine, text engine = create_engine("taosrest://root:taosdata@localhost:6041") -df: pandas.DataFrame = pandas.read_sql("SELECT * FROM power.meters", engine) +conn = engine.connect() +df: pandas.DataFrame = pandas.read_sql(text("SELECT * FROM power.meters"), conn) +conn.close() # print index print(df.index) diff --git a/docs/examples/python/connect_rest_examples.py b/docs/examples/python/connect_rest_examples.py index 900ec1022ec81ac2db761d918d1ec11c9bb26852..0f8625ae5387a275f7b84948ad80191b8e443862 100644 --- a/docs/examples/python/connect_rest_examples.py +++ b/docs/examples/python/connect_rest_examples.py @@ -1,24 +1,25 @@ # ANCHOR: connect from taosrest import connect, TaosRestConnection, TaosRestCursor -conn: TaosRestConnection = connect(url="http://localhost:6041", - user="root", - password="taosdata", - timeout=30) +conn = connect(url="http://localhost:6041", + user="root", + password="taosdata", + timeout=30) # ANCHOR_END: connect # ANCHOR: basic # create STable -cursor: TaosRestCursor = conn.cursor() +cursor = conn.cursor() cursor.execute("DROP DATABASE IF EXISTS power") cursor.execute("CREATE DATABASE power") -cursor.execute("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)") +cursor.execute( + "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)") # insert data -cursor.execute("""INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) - power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) - power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) - power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)""") +cursor.execute("""INSERT INTO power.d1001 USING power.meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) + power.d1002 USING power.meters TAGS('California.SanFrancisco', 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) + power.d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) + power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)""") print("inserted row count:", cursor.rowcount) # query data @@ -28,7 +29,7 @@ print("queried row count:", cursor.rowcount) # get column names from cursor column_names = [meta[0] for meta in cursor.description] # get rows -data: list[tuple] = cursor.fetchall() +data = cursor.fetchall() print(column_names) for row in data: print(row) diff --git a/docs/examples/python/connection_usage_native_reference.py b/docs/examples/python/connection_usage_native_reference.py index 4803511e427bf4d906fd3a14ff6faf5a000da96c..0a23c5f95b9d0f113e861aae07255c46bb5ae0a5 100644 --- a/docs/examples/python/connection_usage_native_reference.py +++ b/docs/examples/python/connection_usage_native_reference.py @@ -8,7 +8,7 @@ conn.execute("CREATE DATABASE test") # change database. same as execute "USE db" conn.select_db("test") conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)") -affected_row: int = conn.execute("INSERT INTO t1 USING weather TAGS(1) VALUES (now, 23.5) (now+1m, 23.5) (now+2m 24.4)") +affected_row = conn.execute("INSERT INTO t1 USING weather TAGS(1) VALUES (now, 23.5) (now+1m, 23.5) (now+2m, 24.4)") print("affected_row", affected_row) # output: # affected_row 3 @@ -16,10 +16,10 @@ print("affected_row", affected_row) # ANCHOR: query # Execute a sql and get its result set. It's useful for SELECT statement -result: taos.TaosResult = conn.query("SELECT * from weather") +result = conn.query("SELECT * from weather") # Get fields from result -fields: taos.field.TaosFields = result.fields +fields = result.fields for field in fields: print(field) # {name: ts, type: 9, bytes: 8} @@ -42,4 +42,4 @@ print(data) # ANCHOR_END: query -conn.close() +conn.close() \ No newline at end of file diff --git a/docs/examples/python/fast_write_example.py b/docs/examples/python/fast_write_example.py new file mode 100644 index 0000000000000000000000000000000000000000..626e3310b120b9415952614b4b110ed29f787582 --- /dev/null +++ b/docs/examples/python/fast_write_example.py @@ -0,0 +1,225 @@ +# install dependencies: +# recommend python >= 3.8 +# + +import logging +import math +import multiprocessing +import sys +import time +import os +from multiprocessing import Process, Queue +from mockdatasource import MockDataSource +from queue import Empty +from typing import List + +logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format="%(asctime)s [%(name)s] - %(message)s") + +READ_TASK_COUNT = 1 +WRITE_TASK_COUNT = 1 +TABLE_COUNT = 1000 +QUEUE_SIZE = 1000000 +MAX_BATCH_SIZE = 3000 + +_DONE_MESSAGE = '__DONE__' + + +def get_connection(): + """ + If variable TDENGINE_FIRST_EP is provided then it will be used. If not, firstEP in /etc/taos/taos.cfg will be used. + You can also override the default username and password by supply variable TDENGINE_USER and TDENGINE_PASSWORD + """ + import taos + firstEP = os.environ.get("TDENGINE_FIRST_EP") + if firstEP: + host, port = firstEP.split(":") + else: + host, port = None, 0 + user = os.environ.get("TDENGINE_USER", "root") + password = os.environ.get("TDENGINE_PASSWORD", "taosdata") + return taos.connect(host=host, port=int(port), user=user, password=password) + + +# ANCHOR: read + +def run_read_task(task_id: int, task_queues: List[Queue], infinity): + table_count_per_task = TABLE_COUNT // READ_TASK_COUNT + data_source = MockDataSource(f"tb{task_id}", table_count_per_task, infinity) + try: + for batch in data_source: + if isinstance(batch, tuple): + batch = [batch] + for table_id, rows in batch: + # hash data to different queue + i = table_id % len(task_queues) + # block putting forever when the queue is full + for row in rows: + task_queues[i].put(row) + if not infinity: + for queue in task_queues: + queue.put(_DONE_MESSAGE) + except KeyboardInterrupt: + pass + finally: + logging.info('read task over') + + +# ANCHOR_END: read + + +# ANCHOR: write +def run_write_task(task_id: int, queue: Queue, done_queue: Queue): + from sql_writer import SQLWriter + log = logging.getLogger(f"WriteTask-{task_id}") + writer = SQLWriter(get_connection) + lines = None + try: + while True: + over = False + lines = [] + for _ in range(MAX_BATCH_SIZE): + try: + line = queue.get_nowait() + if line == _DONE_MESSAGE: + over = True + break + if line: + lines.append(line) + except Empty: + time.sleep(0.1) + if len(lines) > 0: + writer.process_lines(lines) + if over: + done_queue.put(_DONE_MESSAGE) + break + except KeyboardInterrupt: + pass + except BaseException as e: + log.debug(f"lines={lines}") + raise e + finally: + writer.close() + log.debug('write task over') + + +# ANCHOR_END: write + +def set_global_config(): + argc = len(sys.argv) + if argc > 1: + global READ_TASK_COUNT + READ_TASK_COUNT = int(sys.argv[1]) + if argc > 2: + global WRITE_TASK_COUNT + WRITE_TASK_COUNT = int(sys.argv[2]) + if argc > 3: + global TABLE_COUNT + TABLE_COUNT = int(sys.argv[3]) + if argc > 4: + global QUEUE_SIZE + QUEUE_SIZE = int(sys.argv[4]) + if argc > 5: + global MAX_BATCH_SIZE + MAX_BATCH_SIZE = int(sys.argv[5]) + + +# ANCHOR: monitor +def run_monitor_process(done_queue: Queue): + log = logging.getLogger("DataBaseMonitor") + conn = None + try: + conn = get_connection() + + def get_count(): + res = conn.query("SELECT count(*) FROM test.meters") + rows = res.fetch_all() + return rows[0][0] if rows else 0 + + last_count = 0 + while True: + try: + done = done_queue.get_nowait() + if done == _DONE_MESSAGE: + break + except Empty: + pass + time.sleep(10) + count = get_count() + log.info(f"count={count} speed={(count - last_count) / 10}") + last_count = count + finally: + conn.close() + + +# ANCHOR_END: monitor +# ANCHOR: main +def main(infinity): + set_global_config() + logging.info(f"READ_TASK_COUNT={READ_TASK_COUNT}, WRITE_TASK_COUNT={WRITE_TASK_COUNT}, " + f"TABLE_COUNT={TABLE_COUNT}, QUEUE_SIZE={QUEUE_SIZE}, MAX_BATCH_SIZE={MAX_BATCH_SIZE}") + + conn = get_connection() + conn.execute("DROP DATABASE IF EXISTS test") + conn.execute("CREATE DATABASE IF NOT EXISTS test") + conn.execute("CREATE STABLE IF NOT EXISTS test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) " + "TAGS (location BINARY(64), groupId INT)") + conn.close() + + done_queue = Queue() + monitor_process = Process(target=run_monitor_process, args=(done_queue,)) + monitor_process.start() + logging.debug(f"monitor task started with pid {monitor_process.pid}") + + task_queues: List[Queue] = [] + write_processes = [] + read_processes = [] + + # create task queues + for i in range(WRITE_TASK_COUNT): + queue = Queue() + task_queues.append(queue) + + # create write processes + for i in range(WRITE_TASK_COUNT): + p = Process(target=run_write_task, args=(i, task_queues[i], done_queue)) + p.start() + logging.debug(f"WriteTask-{i} started with pid {p.pid}") + write_processes.append(p) + + # create read processes + for i in range(READ_TASK_COUNT): + queues = assign_queues(i, task_queues) + p = Process(target=run_read_task, args=(i, queues, infinity)) + p.start() + logging.debug(f"ReadTask-{i} started with pid {p.pid}") + read_processes.append(p) + + try: + monitor_process.join() + for p in read_processes: + p.join() + for p in write_processes: + p.join() + time.sleep(1) + return + except KeyboardInterrupt: + monitor_process.terminate() + [p.terminate() for p in read_processes] + [p.terminate() for p in write_processes] + [q.close() for q in task_queues] + + +def assign_queues(read_task_id, task_queues): + """ + Compute target queues for a specific read task. + """ + ratio = WRITE_TASK_COUNT / READ_TASK_COUNT + from_index = math.floor(read_task_id * ratio) + end_index = math.ceil((read_task_id + 1) * ratio) + return task_queues[from_index:end_index] + + +if __name__ == '__main__': + multiprocessing.set_start_method('spawn') + main(False) +# ANCHOR_END: main diff --git a/docs/examples/python/kafka_example_common.py b/docs/examples/python/kafka_example_common.py new file mode 100644 index 0000000000000000000000000000000000000000..566748c94e2542aabe8265ed55c85e4b725d69bb --- /dev/null +++ b/docs/examples/python/kafka_example_common.py @@ -0,0 +1,65 @@ +#! encoding = utf-8 +import taos + +LOCATIONS = ['California.SanFrancisco', 'California.LosAngles', 'California.SanDiego', 'California.SanJose', + 'California.PaloAlto', 'California.Campbell', 'California.MountainView', 'California.Sunnyvale', + 'California.SantaClara', 'California.Cupertino'] + +CREATE_DATABASE_SQL = 'create database if not exists {} keep 365 duration 10 buffer 16 wal_level 1' +USE_DATABASE_SQL = 'use {}' +DROP_TABLE_SQL = 'drop table if exists meters' +DROP_DATABASE_SQL = 'drop database if exists {}' +CREATE_STABLE_SQL = 'create stable meters (ts timestamp, current float, voltage int, phase float) tags ' \ + '(location binary(64), groupId int)' +CREATE_TABLE_SQL = 'create table if not exists {} using meters tags (\'{}\', {})' + + +def create_database_and_tables(host, port, user, password, db, table_count): + tags_tables = _init_tags_table_names(table_count=table_count) + conn = taos.connect(host=host, port=port, user=user, password=password) + + conn.execute(DROP_DATABASE_SQL.format(db)) + conn.execute(CREATE_DATABASE_SQL.format(db)) + conn.execute(USE_DATABASE_SQL.format(db)) + conn.execute(DROP_TABLE_SQL) + conn.execute(CREATE_STABLE_SQL) + for tags in tags_tables: + location, group_id = _get_location_and_group(tags) + tables = tags_tables[tags] + for table_name in tables: + conn.execute(CREATE_TABLE_SQL.format(table_name, location, group_id)) + conn.close() + + +def clean(host, port, user, password, db): + conn = taos.connect(host=host, port=port, user=user, password=password) + conn.execute(DROP_DATABASE_SQL.format(db)) + conn.close() + + +def _init_tags_table_names(table_count): + tags_table_names = {} + group_id = 0 + for i in range(table_count): + table_name = 'd{}'.format(i) + location_idx = i % len(LOCATIONS) + location = LOCATIONS[location_idx] + if location_idx == 0: + group_id += 1 + if group_id > 10: + group_id -= 10 + key = _tag_table_mapping_key(location=location, group_id=group_id) + if key not in tags_table_names: + tags_table_names[key] = [] + tags_table_names[key].append(table_name) + + return tags_table_names + + +def _tag_table_mapping_key(location, group_id): + return '{}_{}'.format(location, group_id) + + +def _get_location_and_group(key): + fields = key.split('_') + return fields[0], fields[1] diff --git a/docs/examples/python/kafka_example_consumer.py b/docs/examples/python/kafka_example_consumer.py new file mode 100644 index 0000000000000000000000000000000000000000..e2d5cf535b3953a3c0ecec9e25cc615948162633 --- /dev/null +++ b/docs/examples/python/kafka_example_consumer.py @@ -0,0 +1,231 @@ +#! encoding = utf-8 +import json +import logging +import time +from concurrent.futures import ThreadPoolExecutor, Future +from json import JSONDecodeError +from typing import Callable + +import taos +from kafka import KafkaConsumer +from kafka.consumer.fetcher import ConsumerRecord + +import kafka_example_common as common + + +class Consumer(object): + DEFAULT_CONFIGS = { + 'kafka_brokers': 'localhost:9092', # kafka broker + 'kafka_topic': 'tdengine_kafka_practices', + 'kafka_group_id': 'taos', + 'taos_host': 'localhost', # TDengine host + 'taos_port': 6030, # TDengine port + 'taos_user': 'root', # TDengine user name + 'taos_password': 'taosdata', # TDengine password + 'taos_database': 'power', # TDengine database + 'message_type': 'json', # message format, 'json' or 'line' + 'clean_after_testing': False, # if drop database after testing + 'max_poll': 1000, # poll size for batch mode + 'workers': 10, # thread count for multi-threading + 'testing': False + } + + INSERT_SQL_HEADER = "insert into " + INSERT_PART_SQL = '{} values (\'{}\', {}, {}, {})' + + def __init__(self, **configs): + self.config = self.DEFAULT_CONFIGS + self.config.update(configs) + + self.consumer = None + if not self.config.get('testing'): + self.consumer = KafkaConsumer( + self.config.get('kafka_topic'), + bootstrap_servers=self.config.get('kafka_brokers'), + group_id=self.config.get('kafka_group_id'), + ) + + self.conns = taos.connect( + host=self.config.get('taos_host'), + port=self.config.get('taos_port'), + user=self.config.get('taos_user'), + password=self.config.get('taos_password'), + db=self.config.get('taos_database'), + ) + if self.config.get('workers') > 1: + self.pool = ThreadPoolExecutor(max_workers=self.config.get('workers')) + self.tasks = [] + # tags and table mapping # key: {location}_{groupId} value: + + def consume(self): + """ + + consume data from kafka and deal. Base on `message_type`, `bath_consume`, `insert_by_table`, + there are several deal function. + :return: + """ + self.conns.execute(common.USE_DATABASE_SQL.format(self.config.get('taos_database'))) + try: + if self.config.get('message_type') == 'line': # line + self._run(self._line_to_taos) + if self.config.get('message_type') == 'json': # json + self._run(self._json_to_taos) + except KeyboardInterrupt: + logging.warning("## caught keyboard interrupt, stopping") + finally: + self.stop() + + def stop(self): + """ + + stop consuming + :return: + """ + # close consumer + if self.consumer is not None: + self.consumer.commit() + self.consumer.close() + + # multi thread + if self.config.get('workers') > 1: + if self.pool is not None: + self.pool.shutdown() + for task in self.tasks: + while not task.done(): + time.sleep(0.01) + + # clean data + if self.config.get('clean_after_testing'): + self.conns.execute(common.DROP_TABLE_SQL) + self.conns.execute(common.DROP_DATABASE_SQL.format(self.config.get('taos_database'))) + # close taos + if self.conns is not None: + self.conns.close() + + def _run(self, f): + """ + + run in batch consuming mode + :param f: + :return: + """ + i = 0 # just for test. + while True: + messages = self.consumer.poll(timeout_ms=100, max_records=self.config.get('max_poll')) + if messages: + if self.config.get('workers') > 1: + self.pool.submit(f, messages.values()) + else: + f(list(messages.values())) + if not messages: + i += 1 # just for test. + time.sleep(0.1) + if i > 3: # just for test. + logging.warning('## test over.') # just for test. + return # just for test. + + def _json_to_taos(self, messages): + """ + + convert a batch of json data to sql, and insert into TDengine + :param messages: + :return: + """ + sql = self._build_sql_from_json(messages=messages) + self.conns.execute(sql=sql) + + def _line_to_taos(self, messages): + """ + + convert a batch of lines data to sql, and insert into TDengine + :param messages: + :return: + """ + lines = [] + for partition_messages in messages: + for message in partition_messages: + lines.append(message.value.decode()) + sql = self.INSERT_SQL_HEADER + ' '.join(lines) + self.conns.execute(sql=sql) + + def _build_single_sql_from_json(self, msg_value): + try: + data = json.loads(msg_value) + except JSONDecodeError as e: + logging.error('## decode message [%s] error ', msg_value, e) + return '' + # location = data.get('location') + # group_id = data.get('groupId') + ts = data.get('ts') + current = data.get('current') + voltage = data.get('voltage') + phase = data.get('phase') + table_name = data.get('table_name') + + return self.INSERT_PART_SQL.format(table_name, ts, current, voltage, phase) + + def _build_sql_from_json(self, messages): + sql_list = [] + for partition_messages in messages: + for message in partition_messages: + sql_list.append(self._build_single_sql_from_json(message.value)) + return self.INSERT_SQL_HEADER + ' '.join(sql_list) + + +def test_json_to_taos(consumer: Consumer): + records = [ + [ + ConsumerRecord(checksum=None, headers=None, offset=1, key=None, + value=json.dumps({'table_name': 'd0', + 'ts': '2022-12-06 15:13:38.643', + 'current': 3.41, + 'voltage': 105, + 'phase': 0.02027, }), + partition=1, topic='test', serialized_key_size=None, serialized_header_size=None, + serialized_value_size=None, timestamp=time.time(), timestamp_type=None), + ConsumerRecord(checksum=None, headers=None, offset=1, key=None, + value=json.dumps({'table_name': 'd1', + 'ts': '2022-12-06 15:13:39.643', + 'current': 3.41, + 'voltage': 102, + 'phase': 0.02027, }), + partition=1, topic='test', serialized_key_size=None, serialized_header_size=None, + serialized_value_size=None, timestamp=time.time(), timestamp_type=None), + ] + ] + + consumer._json_to_taos(messages=records) + + +def test_line_to_taos(consumer: Consumer): + records = [ + [ + ConsumerRecord(checksum=None, headers=None, offset=1, key=None, + value="d0 values('2023-01-01 00:00:00.001', 3.49, 109, 0.02737)".encode('utf-8'), + partition=1, topic='test', serialized_key_size=None, serialized_header_size=None, + serialized_value_size=None, timestamp=time.time(), timestamp_type=None), + ConsumerRecord(checksum=None, headers=None, offset=1, key=None, + value="d1 values('2023-01-01 00:00:00.002', 6.19, 112, 0.09171)".encode('utf-8'), + partition=1, topic='test', serialized_key_size=None, serialized_header_size=None, + serialized_value_size=None, timestamp=time.time(), timestamp_type=None), + ] + ] + consumer._line_to_taos(messages=records) + + +def consume(kafka_brokers, kafka_topic, kafka_group_id, taos_host, taos_port, taos_user, + taos_password, taos_database, message_type, max_poll, workers): + c = Consumer(kafka_brokers=kafka_brokers, kafka_topic=kafka_topic, kafka_group_id=kafka_group_id, + taos_host=taos_host, taos_port=taos_port, taos_user=taos_user, taos_password=taos_password, + taos_database=taos_database, message_type=message_type, max_poll=max_poll, workers=workers) + c.consume() + + +if __name__ == '__main__': + consumer = Consumer(testing=True) + common.create_database_and_tables(host='localhost', port=6030, user='root', password='taosdata', db='py_kafka_test', + table_count=10) + consumer.conns.execute(common.USE_DATABASE_SQL.format('py_kafka_test')) + test_json_to_taos(consumer) + test_line_to_taos(consumer) + common.clean(host='localhost', port=6030, user='root', password='taosdata', db='py_kafka_test') diff --git a/docs/examples/python/kafka_example_perform.py b/docs/examples/python/kafka_example_perform.py new file mode 100644 index 0000000000000000000000000000000000000000..23ae4b48c8fc8139b85cd41b041953e8f55f12b4 --- /dev/null +++ b/docs/examples/python/kafka_example_perform.py @@ -0,0 +1,103 @@ +#! encoding=utf-8 + +import argparse +import logging +import multiprocessing +import time +from multiprocessing import pool + +import kafka_example_common as common +import kafka_example_consumer as consumer +import kafka_example_producer as producer + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('-kafka-broker', type=str, default='localhost:9092', + help='kafka borker host. default is `localhost:9200`') + parser.add_argument('-kafka-topic', type=str, default='tdengine-kafka-practices', + help='kafka topic. default is `tdengine-kafka-practices`') + parser.add_argument('-kafka-group', type=str, default='kafka_practices', + help='kafka consumer group. default is `kafka_practices`') + parser.add_argument('-taos-host', type=str, default='localhost', + help='TDengine host. default is `localhost`') + parser.add_argument('-taos-port', type=int, default=6030, help='TDengine port. default is 6030') + parser.add_argument('-taos-user', type=str, default='root', help='TDengine username, default is `root`') + parser.add_argument('-taos-password', type=str, default='taosdata', help='TDengine password, default is `taosdata`') + parser.add_argument('-taos-db', type=str, default='tdengine_kafka_practices', + help='TDengine db name, default is `tdengine_kafka_practices`') + parser.add_argument('-table-count', type=int, default=100, help='TDengine sub-table count, default is 100') + parser.add_argument('-table-items', type=int, default=1000, help='items in per sub-tables, default is 1000') + parser.add_argument('-message-type', type=str, default='line', + help='kafka message type. `line` or `json`. default is `line`') + parser.add_argument('-max-poll', type=int, default=1000, help='max poll for kafka consumer') + parser.add_argument('-threads', type=int, default=10, help='thread count for deal message') + parser.add_argument('-processes', type=int, default=1, help='process count') + + args = parser.parse_args() + total = args.table_count * args.table_items + + logging.warning("## start to prepare testing data...") + prepare_data_start = time.time() + producer.produce_total(100, args.kafka_broker, args.kafka_topic, args.message_type, total, args.table_count) + prepare_data_end = time.time() + logging.warning("## prepare testing data finished! spend-[%s]", prepare_data_end - prepare_data_start) + + logging.warning("## start to create database and tables ...") + create_db_start = time.time() + # create database and table + common.create_database_and_tables(host=args.taos_host, port=args.taos_port, user=args.taos_user, + password=args.taos_password, db=args.taos_db, table_count=args.table_count) + create_db_end = time.time() + logging.warning("## create database and tables finished! spend [%s]", create_db_end - create_db_start) + + processes = args.processes + + logging.warning("## start to consume data and insert into TDengine...") + consume_start = time.time() + if processes > 1: # multiprocess + multiprocessing.set_start_method("spawn") + pool = pool.Pool(processes) + + consume_start = time.time() + for _ in range(processes): + pool.apply_async(func=consumer.consume, args=( + args.kafka_broker, args.kafka_topic, args.kafka_group, args.taos_host, args.taos_port, args.taos_user, + args.taos_password, args.taos_db, args.message_type, args.max_poll, args.threads)) + pool.close() + pool.join() + else: + consume_start = time.time() + consumer.consume(kafka_brokers=args.kafka_broker, kafka_topic=args.kafka_topic, kafka_group_id=args.kafka_group, + taos_host=args.taos_host, taos_port=args.taos_port, taos_user=args.taos_user, + taos_password=args.taos_password, taos_database=args.taos_db, message_type=args.message_type, + max_poll=args.max_poll, workers=args.threads) + consume_end = time.time() + logging.warning("## consume data and insert into TDengine over! spend-[%s]", consume_end - consume_start) + + # print report + logging.warning( + "\n#######################\n" + " Prepare data \n" + "#######################\n" + "# data_type # %s \n" + "# total # %s \n" + "# spend # %s s\n" + "#######################\n" + " Create database \n" + "#######################\n" + "# stable # 1 \n" + "# sub-table # 100 \n" + "# spend # %s s \n" + "#######################\n" + " Consume \n" + "#######################\n" + "# data_type # %s \n" + "# threads # %s \n" + "# processes # %s \n" + "# total_count # %s \n" + "# spend # %s s\n" + "# per_second # %s \n" + "#######################\n", + args.message_type, total, prepare_data_end - prepare_data_start, create_db_end - create_db_start, + args.message_type, args.threads, processes, total, consume_end - consume_start, + total / (consume_end - consume_start)) diff --git a/docs/examples/python/kafka_example_producer.py b/docs/examples/python/kafka_example_producer.py new file mode 100644 index 0000000000000000000000000000000000000000..51468c7e37ab3400bece69fa58e126a789ef9860 --- /dev/null +++ b/docs/examples/python/kafka_example_producer.py @@ -0,0 +1,97 @@ +#! encoding = utf-8 +import json +import random +import threading +from concurrent.futures import ThreadPoolExecutor, Future +from datetime import datetime + +from kafka import KafkaProducer + +locations = ['California.SanFrancisco', 'California.LosAngles', 'California.SanDiego', 'California.SanJose', + 'California.PaloAlto', 'California.Campbell', 'California.MountainView', 'California.Sunnyvale', + 'California.SantaClara', 'California.Cupertino'] + +producers: list[KafkaProducer] = [] + +lock = threading.Lock() +start = 1640966400 + + +def produce_total(workers, broker, topic, message_type, total, table_count): + if len(producers) == 0: + lock.acquire() + if len(producers) == 0: + _init_kafka_producers(broker=broker, count=10) + lock.release() + pool = ThreadPoolExecutor(max_workers=workers) + futures = [] + for _ in range(0, workers): + futures.append(pool.submit(_produce_total, topic, message_type, int(total / workers), table_count)) + pool.shutdown() + for f in futures: + f.result() + _close_kafka_producers() + + +def _produce_total(topic, message_type, total, table_count): + producer = _get_kafka_producer() + for _ in range(total): + message = _get_fake_date(message_type=message_type, table_count=table_count) + producer.send(topic=topic, value=message.encode(encoding='utf-8')) + + +def _init_kafka_producers(broker, count): + for _ in range(count): + p = KafkaProducer(bootstrap_servers=broker, batch_size=64 * 1024, linger_ms=300, acks=0) + producers.append(p) + + +def _close_kafka_producers(): + for p in producers: + p.close() + + +def _get_kafka_producer(): + return producers[random.randint(0, len(producers) - 1)] + + +def _get_fake_date(table_count, message_type='json'): + if message_type == 'json': + return _get_json_message(table_count=table_count) + if message_type == 'line': + return _get_line_message(table_count=table_count) + return '' + + +def _get_json_message(table_count): + return json.dumps({ + 'ts': _get_timestamp(), + 'current': random.randint(0, 1000) / 100, + 'voltage': random.randint(105, 115), + 'phase': random.randint(0, 32000) / 100000, + 'location': random.choice(locations), + 'groupId': random.randint(1, 10), + 'table_name': _random_table_name(table_count) + }) + + +def _get_line_message(table_count): + return "{} values('{}', {}, {}, {})".format( + _random_table_name(table_count), # table + _get_timestamp(), # ts + random.randint(0, 1000) / 100, # current + random.randint(105, 115), # voltage + random.randint(0, 32000) / 100000, # phase + ) + + +def _random_table_name(table_count): + return 'd{}'.format(random.randint(0, table_count - 1)) + + +def _get_timestamp(): + global start + lock.acquire(blocking=True) + start += 0.001 + lock.release() + return datetime.fromtimestamp(start).strftime('%Y-%m-%d %H:%M:%S.%f')[:-3] diff --git a/docs/examples/python/mockdatasource.py b/docs/examples/python/mockdatasource.py new file mode 100644 index 0000000000000000000000000000000000000000..9c702936ea6f1bdff3f604d376fd1925b4dc118e --- /dev/null +++ b/docs/examples/python/mockdatasource.py @@ -0,0 +1,61 @@ +import time + + +class MockDataSource: + samples = [ + "8.8,119,0.32,California.LosAngeles,0", + "10.7,116,0.34,California.SanDiego,1", + "9.9,111,0.33,California.SanJose,2", + "8.9,113,0.329,California.Campbell,3", + "9.4,118,0.141,California.SanFrancisco,4" + ] + + def __init__(self, tb_name_prefix, table_count, infinity=True): + self.table_name_prefix = tb_name_prefix + "_" + self.table_count = table_count + self.max_rows = 10000000 + self.current_ts = round(time.time() * 1000) - self.max_rows * 100 + # [(tableId, tableName, values),] + self.data = self._init_data() + self.infinity = infinity + + def _init_data(self): + lines = self.samples * (self.table_count // 5 + 1) + data = [] + for i in range(self.table_count): + table_name = self.table_name_prefix + str(i) + data.append((i, table_name, lines[i])) # tableId, row + return data + + def __iter__(self): + self.row = 0 + if not self.infinity: + return iter(self._iter_data()) + else: + return self + + def __next__(self): + """ + next 1000 rows for each table. + return: {tableId:[row,...]} + """ + return self._iter_data() + + def _iter_data(self): + ts = [] + for _ in range(1000): + self.current_ts += 100 + ts.append(str(self.current_ts)) + # add timestamp to each row + # [(tableId, ["tableName,ts,current,voltage,phase,location,groupId"])] + result = [] + for table_id, table_name, values in self.data: + rows = [table_name + ',' + t + ',' + values for t in ts] + result.append((table_id, rows)) + return result + + +if __name__ == '__main__': + datasource = MockDataSource('t', 10, False) + for data in datasource: + print(data) diff --git a/docs/examples/python/native_insert_example.py b/docs/examples/python/native_insert_example.py index 94fd00a6e9d1dcd2119693c4b5c862d36c219a3d..cdde7d23d24d12e11c67b6c6acc0e0b089fb5335 100644 --- a/docs/examples/python/native_insert_example.py +++ b/docs/examples/python/native_insert_example.py @@ -25,10 +25,10 @@ def create_stable(conn: taos.TaosConnection): # The generated SQL is: -# INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) -# d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) -# d1003 USING meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) -# d1004 USING meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000) +# INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) +# d1002 USING meters TAGS('California.SanFrancisco', 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) +# d1003 USING meters TAGS('California.LosAngeles', 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) +# d1004 USING meters TAGS('California.LosAngeles', 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000) def get_sql(): global lines diff --git a/docs/examples/python/sql_writer.py b/docs/examples/python/sql_writer.py new file mode 100644 index 0000000000000000000000000000000000000000..3456981a7b9a174e38f8795ff7251ab3c675174b --- /dev/null +++ b/docs/examples/python/sql_writer.py @@ -0,0 +1,111 @@ +import logging +import taos + + +class SQLWriter: + log = logging.getLogger("SQLWriter") + + def __init__(self, get_connection_func): + self._tb_values = {} + self._tb_tags = {} + self._conn = get_connection_func() + self._max_sql_length = self.get_max_sql_length() + self._conn.execute("create database if not exists test") + self._conn.execute("USE test") + + def get_max_sql_length(self): + rows = self._conn.query("SHOW variables").fetch_all() + for r in rows: + name = r[0] + if name == "maxSQLLength": + return int(r[1]) + return 1024 * 1024 + + def process_lines(self, lines: [str]): + """ + :param lines: [[tbName,ts,current,voltage,phase,location,groupId]] + """ + for line in lines: + ps = line.split(",") + table_name = ps[0] + value = '(' + ",".join(ps[1:-2]) + ') ' + if table_name in self._tb_values: + self._tb_values[table_name] += value + else: + self._tb_values[table_name] = value + + if table_name not in self._tb_tags: + location = ps[-2] + group_id = ps[-1] + tag_value = f"('{location}',{group_id})" + self._tb_tags[table_name] = tag_value + self.flush() + + def flush(self): + """ + Assemble INSERT statement and execute it. + When the sql length grows close to MAX_SQL_LENGTH, the sql will be executed immediately, and a new INSERT statement will be created. + In case of "Table does not exit" exception, tables in the sql will be created and the sql will be re-executed. + """ + sql = "INSERT INTO " + sql_len = len(sql) + buf = [] + for tb_name, values in self._tb_values.items(): + q = tb_name + " VALUES " + values + if sql_len + len(q) >= self._max_sql_length: + sql += " ".join(buf) + self.execute_sql(sql) + sql = "INSERT INTO " + sql_len = len(sql) + buf = [] + buf.append(q) + sql_len += len(q) + sql += " ".join(buf) + self.create_tables() + self.execute_sql(sql) + self._tb_values.clear() + + def execute_sql(self, sql): + try: + self._conn.execute(sql) + except taos.Error as e: + error_code = e.errno & 0xffff + # Table does not exit + if error_code == 9731: + self.create_tables() + else: + self.log.error("Execute SQL: %s", sql) + raise e + except BaseException as baseException: + self.log.error("Execute SQL: %s", sql) + raise baseException + + def create_tables(self): + sql = "CREATE TABLE " + for tb in self._tb_values.keys(): + tag_values = self._tb_tags[tb] + sql += "IF NOT EXISTS " + tb + " USING meters TAGS " + tag_values + " " + try: + self._conn.execute(sql) + except BaseException as e: + self.log.error("Execute SQL: %s", sql) + raise e + + def close(self): + if self._conn: + self._conn.close() + + +if __name__ == '__main__': + def get_connection_func(): + conn = taos.connect() + return conn + + + writer = SQLWriter(get_connection_func=get_connection_func) + writer.execute_sql( + "create stable if not exists meters (ts timestamp, current float, voltage int, phase float) " + "tags (location binary(64), groupId int)") + writer.execute_sql( + "INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) " + "VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32)") diff --git a/docs/examples/python/tmq_example.py b/docs/examples/python/tmq_example.py index a4625ca11accfbf7d263f4c1993f712987a136cb..6f7fb87c89ce4cb96793d09a837f60ad54ae69bc 100644 --- a/docs/examples/python/tmq_example.py +++ b/docs/examples/python/tmq_example.py @@ -1,58 +1,55 @@ +from taos.tmq import Consumer import taos -from taos.tmq import * -conn = taos.connect() -print("init") -conn.execute("drop topic if exists topic_ctb_column") -conn.execute("drop database if exists py_tmq") -conn.execute("create database if not exists py_tmq vgroups 2") -conn.select_db("py_tmq") -conn.execute( - "create stable if not exists stb1 (ts timestamp, c1 int, c2 float, c3 binary(10)) tags(t1 int)" -) -conn.execute("create table if not exists tb1 using stb1 tags(1)") -conn.execute("create table if not exists tb2 using stb1 tags(2)") -conn.execute("create table if not exists tb3 using stb1 tags(3)") - -print("create topic") -conn.execute( - "create topic if not exists topic_ctb_column as select ts, c1, c2, c3 from stb1" -) - -print("build consumer") -conf = TaosTmqConf() -conf.set("group.id", "tg2") -conf.set("td.connect.user", "root") -conf.set("td.connect.pass", "taosdata") -conf.set("enable.auto.commit", "true") - - -def tmq_commit_cb_print(tmq, resp, offset, param=None): - print(f"commit: {resp}, tmq: {tmq}, offset: {offset}, param: {param}") - - -conf.set_auto_commit_cb(tmq_commit_cb_print, None) -tmq = conf.new_consumer() - -print("build topic list") - -topic_list = TaosTmqList() -topic_list.append("topic_ctb_column") - -print("basic consume loop") -tmq.subscribe(topic_list) - -sub_list = tmq.subscription() - -print("subscribed topics: ", sub_list) - -while 1: - res = tmq.poll(1000) - if res: - topic = res.get_topic_name() - vg = res.get_vgroup_id() - db = res.get_db_name() - print(f"topic: {topic}\nvgroup id: {vg}\ndb: {db}") - for row in res: - print(row) +def init_tmq_env(db, topic): + conn = taos.connect() + conn.execute("drop topic if exists {}".format(topic)) + conn.execute("drop database if exists {}".format(db)) + conn.execute("create database if not exists {}".format(db)) + conn.select_db(db) + conn.execute( + "create stable if not exists stb1 (ts timestamp, c1 int, c2 float, c3 varchar(16)) tags(t1 int, t3 varchar(16))") + conn.execute("create table if not exists tb1 using stb1 tags(1, 't1')") + conn.execute("create table if not exists tb2 using stb1 tags(2, 't2')") + conn.execute("create table if not exists tb3 using stb1 tags(3, 't3')") + conn.execute("create topic if not exists {} as select ts, c1, c2, c3 from stb1".format(topic)) + conn.execute("insert into tb1 values (now, 1, 1.0, 'tmq test')") + conn.execute("insert into tb2 values (now, 2, 2.0, 'tmq test')") + conn.execute("insert into tb3 values (now, 3, 3.0, 'tmq test')") + + +def cleanup(db, topic): + conn = taos.connect() + conn.execute("drop topic if exists {}".format(topic)) + conn.execute("drop database if exists {}".format(db)) + + +if __name__ == '__main__': + init_tmq_env("tmq_test", "tmq_test_topic") # init env + consumer = Consumer( + { + "group.id": "tg2", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "enable.auto.commit": "true", + } + ) + consumer.subscribe(["tmq_test_topic"]) + + try: + while True: + res = consumer.poll(1) + if not res: + break + err = res.error() + if err is not None: + raise err + val = res.value() + + for block in val: + print(block.fetchall()) + finally: + consumer.unsubscribe() + consumer.close() + cleanup("tmq_test", "tmq_test_topic") diff --git a/docs/examples/python/tmq_websocket_example.py b/docs/examples/python/tmq_websocket_example.py new file mode 100644 index 0000000000000000000000000000000000000000..e1dcb0086a995c0c20a5d079ed6d8f4d18ea0356 --- /dev/null +++ b/docs/examples/python/tmq_websocket_example.py @@ -0,0 +1,31 @@ +#!/usr/bin/python3 +from taosws import Consumer + +conf = { + "td.connect.websocket.scheme": "ws", + "group.id": "0", +} +consumer = Consumer(conf) + +consumer.subscribe(["test"]) + +while True: + message = consumer.poll(timeout=1.0) + if message: + id = message.vgroup() + topic = message.topic() + database = message.database() + + for block in message: + nrows = block.nrows() + ncols = block.ncols() + for row in block: + print(row) + values = block.fetchall() + print(nrows, ncols) + + # consumer.commit(message) + else: + break + +consumer.close() diff --git a/docs/zh/01-index.md b/docs/zh/01-index.md index d2e6706892f3997af115e71d1da455ebce2ecbec..a406fe663c56a1b1641b52fdca09c9e869408476 100644 --- a/docs/zh/01-index.md +++ b/docs/zh/01-index.md @@ -1,25 +1,35 @@ --- -title: TDengine 文档 -sidebar_label: 文档首页 +title: TDengine Cloud 文档 +sidebar_label: 主页 slug: / --- -TDengine 是一款[高性能](https://www.taosdata.com/fast)、[分布式](https://www.taosdata.com/scalable)、[支持 SQL](https://www.taosdata.com/sql-support) 的时序数据库 (Database)。本文档是 TDengine 用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发者与系统管理员的。 +TDengine Cloud 是全托管的时序数据处理云服务平台。它是基于开源的时序数据库 TDengine 而开发的。除高性能的时序数据库之外,它还具有缓存、订阅和流计算等系统功能,而且提供了便利而又安全的数据分享、以及众多的企业级服务功能。它可以让物联网、工业互联网、金融、IT 运维监控等领域企业在时序数据的管理上大幅降低人力成本和运营成本。 -TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用TDengine, 无论如何,请您仔细阅读[基本概念](./concept)一章。 +同时客户可以放心使用无处不在的第三方工具,比如 Prometheus,Telegraf,Grafana 和 MQTT 消息服务器等。天然地,TDengine Cloud 还支持 Python,Java,Go,Rust 和 Node.js等连接器。开发者可以选择自己习惯的语言来开发。通过支持 SQL,还有无模式的方式,TDengine Cloud 能够满足所有开发者的需求。TDengine Cloud 还提供了额外的特殊功能来进行时序数据的风险,使数据的分析和可视化变得极其简单。 -如果你是开发者,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、连续查询、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要把示例代码拷贝粘贴,针对自己的应用稍作改动,就能跑起来。 +下面是 TDengine Cloud 的文档结构: -我们已经生活在大数据的时代,纵向扩展已经无法满足日益增长的业务需求,任何系统都必须具有水平扩展的能力,集群成为大数据以及 database 系统的不可缺失功能。TDengine 团队不仅实现了集群功能,而且将这一重要核心功能开源。怎么部署、管理和维护 TDengine 集群,请参考[集群管理](./cluster)一章。 +1. [产品简介](./intro) 概述TDengine Cloud 的特点,能力和竞争优势。 -TDengine 采用 SQL 作为其查询语言,大大降低学习成本、降低迁移成本,但同时针对时序数据场景,又做了一些扩展,以支持插值、降采样、时间加权平均等操作。[SQL 手册](./taos-sql)一章详细描述了 SQL 语法、详细列出了各种支持的命令和函数。 +2. [基本概念](./concept) 主要介绍 TDengine 如何有效利用时间序列数据的特点来提高计算性能,同时提高存储效率。 -如果你是系统管理员,关心安装、升级、容错灾备、关心数据导入、导出,配置参数,怎么监测 TDengine 是否健康运行,怎么提升系统运行的性能,那么请仔细参考[运维指南](./operation)一章。 +3. [数据写入](./data-in) 主要介绍 TDengine Cloud 提供了多种数据写入 TDengine 实例的方式。在数据源部分,您可以方便地从边缘云或者主机上面的 TDengine 把数据写入云上的任何实例。 -如果你对 TDengine 外围工具,REST API, 各种编程语言的连接器想做更多详细了解,请看[参考指南](./reference)一章。 +4. [数据输出](./data-out) 主要介绍 TDengine Cloud 提供极简的访问您的时序数据的方式,通过这些方式,您可以方便的利用 TDengine 实例的数据来开发您的数据分析和可视化应用。 -如果你对 TDengine 内部的架构设计很有兴趣,欢迎仔细阅读[技术内幕](./tdinternal)一章,里面对集群的设计、数据分区、分片、写入、读出、查询、聚合查询的流程都做了详细的介绍。如果你想研读 TDengine 代码甚至贡献代码,请一定仔细读完这一章。 +5. [可视化](./visual) 主要介绍您如何使用 TDengine Cloud 上面存储的时序数据进行可视化开发,比如您可以监控和可视化您的 TDengine Cloud 上面的实例和数据库状态。 -最后,作为一个开源软件,欢迎大家的参与。如果发现文档的任何错误,描述不清晰的地方,都请在每个页面的最下方,点击“编辑本文档“直接进行修改。 +6. [数据订阅](./data-subscription) 这个部分是 TDengine Cloud 的高级功能,类似于异步发布/订阅能力,即发布到一个主题的消息会被该主题的所有订阅者立即收到通知。 TDengine Cloud 的数据订阅让您无需部署任何的消息发布/订阅系统,比如 Kafka,就可以创建自己的事件驱动应用。而且我们提供了便捷而安全的方式,让您通过创建主题和分享主题给他人都变得极其容易。 -Together, we make a difference! +7. [流式计算](./stream) 这个部分也是 TDengine Cloud 的另外一个高级功能。通过这个功能,您无需无需部署任何流式处理系统,比如 Spark/Flink,就能创建连续查询或时间驱动的流计算。 TDengine Cloud 的流式计算可以很方便的让您实时处理进入的流式数据并把它们很轻松地按照您定义的规则导入到目的表里面。 + +8. [数据复制](./replication) 是 TDengine Cloud 提高的成熟的数据复制功能。您可以从云端同一个区域的一个实例复制到另外一个实例,也可以从一个云服务商的区域复制到另外一个云服务商的区域。 + +9. [开发指南](./programming) 是使用 TDengine Cloud 上的时序数据开发 IoT 和大数据应用必须阅读的部分。在这一部分中,我们详细介绍了数据库连接,数据建模,数据抽取,数据查询,流式计算,缓存,数据订阅,用户自定义函数和其他功能。我们还提供了各种编程语言的示例代码。在大多数情况下,您只需简单地复制和粘贴这些示例代码,在您的应用程序中再做一些细微修改就能工作。 + +10. [TDengine SQL](./taos-sql) 提供了标准 SQL 以及TDengine 扩展部分的详细介绍,通过这些 SQL 语句能方便地进行时序数据分析。 + +11. [工具](./tools)主要介绍 Taos CLI 这个通过终端来执行的命令行工具。 通过运行这个工具,可以轻松和便捷地访问您在 TDengine Cloud 的 TDengine 实例的数据库数据并进行各种查询。另外还介绍了 taosBenchmark 这个工具。通过这个工具可以帮助您用简单的配置就能比较容易地产生大量的数据,并测试 TDengine Cloud 的性能。 + +我们非常高兴您选择 TDengine Cloud 作为您的时序数据平台的一部分,并期待着听到您的反馈以及改进意见,并成为您成功的一个小部分。 diff --git a/docs/zh/02-intro.md b/docs/zh/02-intro.md new file mode 100644 index 0000000000000000000000000000000000000000..17f38f861e9b1ef6ad30c7c57a223c8a28a0adf2 --- /dev/null +++ b/docs/zh/02-intro.md @@ -0,0 +1,101 @@ +--- +sidebar_label: 产品简介 +title: TDengine Cloud 的产品简介 +--- + +TDengine Cloud 是全托管的时序数据处理云服务平台。它是基于开源的时序数据库 TDengine 而开发的。除高性能的时序数据库之外,它还具有缓存、订阅和流计算等系统功能,而且提供了便利而又安全的数据分享、以及众多的企业级服务功能。它可以让物联网、工业互联网、金融、IT 运维监控等领域企业在时序数据的管理上大幅降低人力成本和运营成本。 + +本章节主要介绍 TDengine Cloud 的主要功能,竞争优势和典型使用案例,让大家对 TDengine Cloud 有个整体的了解。 + +## 主要功能 + +TDengine Cloud 的主要功能如下: + +1. 数据写入 + - 支持[使用 SQL 插入数据](../programming/insert/)。 + - 支持 [Telegraf](../data-in/telegraf/)。 + - 支持 [Prometheus](../data-in/prometheus/)。 +2. 数据输出 + - 支持标准 [SQL](../programming/query/),包括子查询。 + - 支持通过工具 [taosDump](../data-out/taosdump/) 导出数据。 + - 支持输出数据到 [Prometheus](../data-out/prometheus/)。 + - 支持通过[数据订阅](../data-subscription/)的方式导出数据. +3. 数据浏览器: 可以浏览数据库和各种表,如果您已经登录,还可以直接执行 SQL 查询语句。 +4. 可视化 + - 支持 [Grafana](../visual/grafana/)。 + - 支持 Google Data Studio。 + - 支持 Grafana Cloud (稍后发布) +5. [数据订阅](../data-subscription/): 用户的应用可以订阅一个数据库,一张表或者一组表。使用的 API 跟 Kafka 基本一致,但是您必须设置具体的过滤条件来定义一个主题,然后您可以和 TDengine Cloud 的其他用户或者用户组分享这个主题。 +6. [流计算](../stream/):不仅支持连续查询,TDengine还支持基于事件驱动的流计算,无需安装 Flink/Spark 就可以处理时序数据。 +7. 企业版 + - 支持每天备份数据。 + - 支持复制一个数据库到另外一个区域或者另外一个云。 + - 支持对等 VPC。 + - 支持 IP 白名单。 +9. 工具 + - 提供一个交互式 [命令行工具 (CLI)](../tools/cli/) 管理和实时查询。 + - 提供一个性能检测工具 [taosBenchmark](../tools/taosbenchmark/) 来测试 TDengine 的性能。 +10. 编程 + - 提供各种[连接器](../programming/connector/),比如 Java,Python,Go,Rust,Node.js 等编程语言。 + - 提供了[REST API](../programming/connector/rest-api/)。 + +更多细节功能,请阅读整个文档。 + +## 竞争优势 + +由于 TDengine Cloud 充分利用了[时序数据特点](https://www.taosdata.com/blog/2019/07/09/105.html),比如结构化、无需事务、很少删除或更新、写多读少等等,还有它云原生的设计使 TDengine Cloud 区别于其他时序数据云服务,具有以下特点: + +- **[极简时序数据平台](https://www.taosdata.com/tdengine/simplified_solution_for_time-series_data_processing)**:全托管的云服务,用户无需担心繁琐的部署、优化、扩容、备份、异地容灾等事务,可全心关注核心业务,减少对DBA的要求,大幅节省人力成本。 + + 除高性能、具有水平扩展能力的时序数据库外, TDengine 云服务还提供: + + **缓存**:无需部署 Redis,应用就能快速的获得最新数据。 + + **数据订阅**:无需部署 Kafka, 当系统接收到新的数据时,应用将立即收到通知。 + + **流式计算**:无需部署 Spark/Flink, 应用就能创建连续查询或时间驱动的流计算。 + +- **[便捷而且安全的数据共享](https://www.taosdata.com/tdengine/cloud/data-sharing)**:TDengine Cloud 既支持将一个库完全开放,设置读或写的权限;也支持通过数据订阅的方式,将库、超级表、一组或一张表、或聚合处理后的数据分享出去。 + + **便捷**:如同在线文档一样简单,只需输入对方邮件地址,设置访问权限和访问时长即可实现分享。对方收到邮件,接受邀请后,可即刻访问。 + + **安全**:访问权限可以控制到一个运行实例、库或订阅的 topic;对于每个授权的用户,对分享的资源,会生成一个访问用的 token;访问可以设置到期时间。 + + 便捷而又安全的时序数据共享,让企业各部门或合作伙伴之间快速洞察业务的运营。 + +- **[安全可靠的企业级服务](https://tdengine.com/tdengine/high-performance-time-series-database/)**:除强大的时序数据管理、共享功能之外,TDengine Cloud 还提供企业运营必需的 + + **可靠**:提供数据定时备份、恢复,数据从运行实例到私有云、其他公有云或 Region 的实时复制。 + + **安全**:提供基于角色的访问权限控制、IP 白名单、用户行为审计等功能。 + + **专业**:提供7*24的专业技术服务,承诺 99.9% 的 Service Level Agreement。 + + 安全、专业、高效可靠的企业级服务,用户无需再为数据管理发愁,可以聚焦自身的核心业务。 + +- **[分析能力](https://www.taosdata.com/tdengine/easy_data_analytics)**:通过超级表、存储计算分离、分区分片、预计算和其它技术,TDengine 能够高效地浏览、格式化和访问数据。 + +- **[核心开源](https://www.taosdata.com/tdengine/open_source_time-series_database)**:TDengine 的核心代码包括集群功能全部在开源协议下公开。全球超过 140k 个运行实例,GitHub Star 20k,且拥有一个活跃的开发者社区。 + +采用 TDengine Cloud,可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。表现在几个方面: + +1. 由于其超强性能,它能将系统所需的计算资源和存储资源大幅降低 +2. 因为支持 SQL,能与众多第三方软件无缝集成,学习迁移成本大幅下降 +3. 因为是一款极简的时序数据平台,系统复杂度、研发和运营成本大幅降低 + +## 技术生态 + +在整个时序大数据平台中,TDengine 扮演的角色如下: + +
+ +![TDengine Database 技术生态图](eco_system.webp) + +
图 1. TDengine 技术生态图
+
+ +上图中,左侧是各种数据采集或消息队列,包括 OPC-UA、MQTT、Telegraf、也包括 Kafka,他们的数据将被源源不断的写入到 TDengine。右侧则是可视化、BI 工具、组态软件、应用程序。下侧则是 TDengine 自身提供的命令行程序(CLI)以及可视化管理工具。 + +## 典型适用场景 + +作为一个高性能、分布式、支持 SQL 的时序数据库(Database),TDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是,TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具,因其充分利用了时序大数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。下面本文将对适用场景做更多详细的分析。 diff --git a/docs/zh/04-concept/_category_.yml b/docs/zh/04-concept/_category_.yml new file mode 100644 index 0000000000000000000000000000000000000000..aad75dce21f63a6510bc0b8da4c93952767adfdf --- /dev/null +++ b/docs/zh/04-concept/_category_.yml @@ -0,0 +1 @@ +label: 基本概念 \ No newline at end of file diff --git a/docs/zh/04-concept/index.md b/docs/zh/04-concept/index.md new file mode 100644 index 0000000000000000000000000000000000000000..2cba68edcd152f5059845b9e25342b3f335f3b8b --- /dev/null +++ b/docs/zh/04-concept/index.md @@ -0,0 +1,183 @@ +--- +sidebar_label: 基本概念 +title: 数据模型和基本概念 +description: TDengine 的数据模型和基本概念 +--- + +为了便于解释基本概念,便于撰写示例程序,整个 TDengine 文档以智能电表作为典型时序数据场景。假设每个智能电表采集电流、电压、相位三个量,有多个智能电表,每个电表有位置 Location 和分组 Group ID 的静态属性. 其采集的数据类似如下的表格: + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Device IDTimestampCollected MetricsTags
currentvoltagephaselocationgroupid
d1001153854868500010.32190.31California.SanFrancisco2
d1002153854868400010.22200.23California.SanFrancisco3
d1003153854868650011.52210.35California.LosAngeles3
d1004153854868550013.42230.29California.LosAngeles2
d1001153854869500012.62180.33California.SanFrancisco2
d1004153854869660011.82210.28California.LosAngeles2
d1002153854869665010.32180.25California.SanFrancisco3
d1001153854869680012.32210.31California.SanFrancisco2
+表 1. 智能电表数据示例 +
+ +每一条记录都有设备 ID、时间戳、采集的物理量(如上表中的 `current`、`voltage` 和 `phase`)以及每个设备相关的静态标签(`location` 和 `groupid`)。每个设备是受外界的触发,或按照设定的周期采集数据。采集的数据点是时序的,是一个数据流。 + +## 采集量(Metric) + +采集量是指传感器、设备或其他类型采集点采集的物理量,比如电流、电压、温度、压力、GPS 位置等,是随时间变化的,数据类型可以是整型、浮点型、布尔型,也可是字符串。随着时间的推移,存储的采集量的数据量越来越大。智能电表示例中的电流、电压、相位就是采集量。 + +## 标签(Label/Tag) + +标签是指传感器、设备或其他类型采集点的静态属性,不是随时间变化的,比如设备型号、颜色、设备的所在地等,数据类型可以是任何类型。虽然是静态的,但 TDengine 容许用户修改、删除或增加标签值。与采集量不一样的是,随时间的推移,存储的标签的数据量不会有什么变化。智能电表示例中的 `location` 与 `groupid` 就是标签。 + +## 数据采集点(Data Collection Point) + +数据采集点是指按照预设时间周期或受事件触发采集物理量的硬件或软件。一个数据采集点可以采集一个或多个采集量,**但这些采集量都是同一时刻采集的,具有相同的时间戳**。对于复杂的设备,往往有多个数据采集点,每个数据采集点采集的周期都可能不一样,而且完全独立,不同步。比如对于一台汽车,有数据采集点专门采集 GPS 位置,有数据采集点专门采集发动机状态,有数据采集点专门采集车内的环境,这样一台汽车就有三个数据采集点。智能电表示例中的 d1001、d1002、d1003、d1004 等就是数据采集点。 + +## 表(Table) + +因为采集量一般是结构化数据,同时为降低学习门槛,TDengine 采用传统的关系型数据库模型管理数据。用户需要先创建库,然后创建表,之后才能插入或查询数据。 + +为充分利用其数据的时序性和其他数据特点,TDengine 采取**一个数据采集点一张表**的策略,要求对每个数据采集点单独建表(比如有一千万个智能电表,就需创建一千万张表,上述表格中的 d1001,d1002,d1003,d1004 都需单独建表),用来存储这个数据采集点所采集的时序数据。这种设计有几大优点: + +1. 由于不同数据采集点产生数据的过程完全独立,每个数据采集点的数据源是唯一的,一张表也就只有一个写入者,这样就可采用无锁方式来写,写入速度就能大幅提升。 +2. 对于一个数据采集点而言,其产生的数据是按照时间排序的,因此写的操作可用追加的方式实现,进一步大幅提高数据写入速度。 +3. 一个数据采集点的数据是以块为单位连续存储的。如果读取一个时间段的数据,它能大幅减少随机读取操作,成数量级的提升读取和查询速度。 +4. 一个数据块内部,采用列式存储,对于不同数据类型,采用不同压缩算法,而且由于一个数据采集点的采集量的变化是缓慢的,压缩率更高。 + +如果采用传统的方式,将多个数据采集点的数据写入一张表,由于网络延时不可控,不同数据采集点的数据到达服务器的时序是无法保证的,写入操作是要有锁保护的,而且一个数据采集点的数据是难以保证连续存储在一起的。**采用一个数据采集点一张表的方式,能最大程度的保证单个数据采集点的插入和查询的性能是最优的。** + +TDengine 建议用数据采集点的名字(如上表中的 d1001)来做表名。每个数据采集点可能同时采集多个采集量(如上表中的 `current`、`voltage` 和 `phase`),每个采集量对应一张表中的一列,数据类型可以是整型、浮点型、字符串等。除此之外,表的第一列必须是时间戳,即数据类型为 Timestamp。对采集量,TDengine 将自动按照时间戳建立索引,但对采集量本身不建任何索引。数据用列式存储方式保存。 + +对于复杂的设备,比如汽车,它有多个数据采集点,那么就需要为一辆汽车建立多张表。 + +## 超级表(STable) + +由于一个数据采集点一张表,导致表的数量巨增,难以管理,而且应用经常需要做采集点之间的聚合操作,聚合的操作也变得复杂起来。为解决这个问题,TDengine 引入超级表(Super Table,简称为 STable)的概念。 + +超级表是指某一特定类型的数据采集点的集合。同一类型的数据采集点,其表的结构是完全一样的,但每个表(数据采集点)的静态属性(标签)是不一样的。描述一个超级表(某一特定类型的数据采集点的集合),除需要定义采集量的表结构之外,还需要定义其标签的 Schema,标签的数据类型可以是整数、浮点数、字符串、JSON,标签可以有多个,可以事后增加、删除或修改。如果整个系统有 N 个不同类型的数据采集点,就需要建立 N 个超级表。 + +在 TDengine 的设计里,**表用来代表一个具体的数据采集点,超级表用来代表一组相同类型的数据采集点集合**。智能电表示例中,我们可以创建一个超级表 `meters`. + +## 子表(Subtable) + +当为某个具体数据采集点创建表时,用户可以使用超级表的定义做模板,同时指定该具体采集点(表)的具体标签值来创建该表。**通过超级表创建的表称之为子表**。正常的表与子表的差异在于: + +1. 子表就是表,因此所有正常表的 SQL 操作都可以在子表上执行。 +2. 子表在正常表的基础上有扩展,它是带有静态标签的,而且这些标签可以事后增加、删除、修改,而正常的表没有。 +3. 子表一定属于一张超级表,但普通表不属于任何超级表 +4. 普通表无法转为子表,子表也无法转为普通表。 + +超级表与与基于超级表建立的子表之间的关系表现在: + +1. 一张超级表包含有多张子表,这些子表具有相同的采集量 Schema,但带有不同的标签值。 +2. 不能通过子表调整数据或标签的模式,对于超级表的数据模式修改立即对所有的子表生效。 +3. 超级表只定义一个模板,自身不存储任何数据或标签信息。因此,不能向一个超级表写入数据,只能将数据写入子表中。 + +查询既可以在表上进行,也可以在超级表上进行。针对超级表的查询,TDengine 将把所有子表中的数据视为一个整体数据集进行处理,会先把满足标签过滤条件的表从超级表中找出来,然后再扫描这些表的时序数据,进行聚合操作,这样需要扫描的数据集会大幅减少,从而显著提高查询的性能。本质上,TDengine 通过对超级表查询的支持,实现了多个同类数据采集点的高效聚合。 + +TDengine 系统建议给一个数据采集点建表,需要通过超级表建表,而不是建普通表。在智能电表的示例中,我们可以通过超级表 meters 创建子表 d1001、d1002、d1003、d1004 等。 + +为了更好地理解采集量、标签、超级与子表的关系,可以参考下面关于智能电表数据模型的示意图。 + +
+ +![智能电表数据模型示意图](./supertable.webp) + +
图 1. 智能电表数据模型示意图
+
+ +## 库(Database) + +库是指一组表的集合。TDengine 容许一个运行实例有多个库,而且每个库可以配置不同的存储策略。不同类型的数据采集点往往具有不同的数据特征,包括数据采集频率的高低,数据保留时间的长短,副本的数目,数据块的大小,是否允许更新数据等等。为了在各种场景下 TDengine 都能最大效率的工作,TDengine 建议将不同数据特征的超级表创建在不同的库里。 + +一个库里,可以有一到多个超级表,但一个超级表只属于一个库。一个超级表所拥有的子表全部存在一个库里。 + +## FQDN & Endpoint + +FQDN(Fully Qualified Domain Name,完全限定域名)是 Internet 上特定计算机或主机的完整域名。FQDN 由两部分组成:主机名和域名。例如,假设邮件服务器的 FQDN 可能是 mail.tdengine.com。主机名是 mail,主机位于域名 tdengine.com 中。DNS(Domain Name System),负责将 FQDN 翻译成 IP,是互联网应用的寻址方式。对于没有 DNS 的系统,可以通过配置 hosts 文件来解决。 + +TDengine 集群的每个节点是由 Endpoint 来唯一标识的,Endpoint 是由 FQDN 外加 Port 组成,比如 h1.tdengine.com:6030。这样当 IP 发生变化的时候,我们依然可以使用 FQDN 来动态找到节点,不需要更改集群的任何配置。而且采用 FQDN,便于内网和外网对同一个集群的统一访问。 + +TDengine 不建议采用直接的 IP 地址访问集群,不利于管理。不了解 FQDN 概念,请看博文[《一篇文章说清楚 TDengine 的 FQDN》](https://www.taosdata.com/blog/2020/09/11/1824.html)。 diff --git a/docs/zh/04-concept/supertable.webp b/docs/zh/04-concept/supertable.webp new file mode 100644 index 0000000000000000000000000000000000000000..764b8f3de7ee92a103b2fcd0e75c03773af5ee37 Binary files /dev/null and b/docs/zh/04-concept/supertable.webp differ diff --git a/docs/zh/07-data-in/02-prometheus.md b/docs/zh/07-data-in/02-prometheus.md new file mode 100644 index 0000000000000000000000000000000000000000..a4c00de93da72e83349b8b7104863f6f48c14130 --- /dev/null +++ b/docs/zh/07-data-in/02-prometheus.md @@ -0,0 +1,72 @@ +--- +sidebar_label: Prometheus +title: Prometheus +description: 使用 Prometheus 访问 TDengine +--- + +Prometheus 是一款流行的开源监控告警系统。Prometheus 于2016年加入了 Cloud Native Computing Foundation (云原生云计算基金会,简称 CNCF),成为继 Kubernetes 之后的第二个托管项目,该项目拥有非常活跃的开发人员和用户社区。 + +Prometheus 提供了 `remote_write` 和 `remote_read` 接口来利用其它数据库产品作为它的存储引擎。为了让 Prometheus 生态圈的用户能够利用 TDengine 的高效写入和查询,TDengine 也提供了对这两个接口的支持。 + +通过适当的配置, Prometheus 的数据可以通过 `remote_write` 接口存储到 TDengine 中,也可以通过 `remote_read` 接口来查询存储在 TDengine 中的数据,充分利用 TDengine 对时序数据的高效存储查询性能和集群处理能力。 + +## 前置条件 + +登录到 TDengine Cloud ,在左边的菜单点击”数据浏览器“,然后再点击”数据库“标签旁边的”+“按钮添加一个名称是”prometheus_data“使用默认参数的数据库。然后执行 `show databases` SQL确认数据库确实被成功创建出来。 + +## 安装 Prometheus + +假设您使用的是 amd64 架构的 Linux 操作系统: +1. 下载 + ``` + wget https://github.com/prometheus/prometheus/releases/download/v2.37.0/prometheus-2.37.0.linux-amd64.tar.gz + ``` +2. 解压和重命名 + ``` + tar xvfz prometheus-*.tar.gz && mv prometheus-2.37.0.linux-amd64 prometheus + ``` +3. 改变目录为 prometheus + ``` + cd prometheus + ``` + +然后 Prometheus 就会被安装到当前目录. 想了解更多 Prometheus 安装选型,请参考[官方文档](https://prometheus.io/docs/prometheus/latest/installation/). + +## 配置 Prometheus + +可以通过编辑 Prometheus 配置文件 `prometheus.yml` 来设置 Prometheus (如果您完全按照上面的步骤执行,您可以在当前目录找到 prometheus.xml 文件)。 + +```yaml +remote_write: + - url: "/prometheus/v1/remote_write/prometheus_data?token=" + +remote_read: + - url: "/prometheus/v1/remote_read/prometheus_data?token=" + remote_timeout: 10s + read_recent: true +``` + + +您可以使用真实的 TDengine Cloud 的URL和令牌来替换上面的``和``。可以通过访问[TDengine Cloud](https://cloud.taosdata.com)来获取真实的值。 + + +配置完成后,Prometheus 会从自己的 HTTP 指标端点收集数据并存储到 TDengine Cloud 里面。 + +## 启动 Prometheus + +``` +./prometheus --config.file prometheus.yml +``` + +之后 Prometheus 应该已经启动好。同时也启动了一个 Web 服务器。如果您想从浏览器访问这个 Web 服务器, 可以根据您的网络环境修改 `localhost` 为正确的主机名,FQDN 或者 IP 地址。 + +## 验证远程写入 + +Log in TDengine Cloud, click "Explorer" on the left navigation bar. You will see metrics collected by prometheus. +登录 TDengine Cloud ,然后点击左边导航栏的”数据浏览器“。您就会看见由 Prometheus 收集的指标数据。 +![TDengine prometheus remote_write result](prometheus_data.webp) + +:::note + +- TDengine 会根据一定规则自动为子表名创建唯一的 IDs。 +::: diff --git a/docs/zh/07-data-in/03-telegraf.md b/docs/zh/07-data-in/03-telegraf.md new file mode 100644 index 0000000000000000000000000000000000000000..44c049bc2d0b37f22cea17daf0c34a3ab4a89657 --- /dev/null +++ b/docs/zh/07-data-in/03-telegraf.md @@ -0,0 +1,88 @@ +--- +sidebar_label: Telegraf +title: Telegraf 写入 +description: 使用 Telegraf 向 TDengine 写入数据 +--- + +Telegraf 是一款十分流行的指标采集开源软件。在数据采集和平台监控系统中,Telegraf 可以采集多种组件的运行信息,而不需要自己手写脚本定时采集,降低数据获取的难度。 + +只需要将 Telegraf 的输出配置增加指向 taosAdapter 对应的 url 并修改若干配置项即可将 Telegraf 的数据写入到 TDengine 中。将 Telegraf 的数据存在到 TDengine 中可以充分利用 TDengine 对时序数据的高效存储查询性能和集群处理能力。 + +## 前置条件 + +要将 Telegraf 数据写入 TDengine Cloud ,需要首先手动创建一个数据库。登录到 TDengine Cloud ,在左边的菜单点击”数据浏览器“,然后再点击”数据库“标签旁边的”+“按钮添加一个名称是”telegraf“使用默认参数的数据库。 + +## 安装 Telegraf + +假设您使用的是 Ubuntu 操作系统: + +```bash +{{#include docs/examples/thirdparty/install-telegraf.sh:null:nrc}} +``` + +安装结束以后,Telegraf 服务应该已经启动。请先停止它: + +```bash +sudo systemctl stop telegraf +``` + +## 配置环境变量 + +在您的终端命令行里面执行下面的命令来保存 TDengine Cloud 的令牌和URL为环境变量: + +```bash +export TDENGINE_CLOUD_URL="" +export TDENGINE_CLOUD_TOKEN="" +``` + + +您可以使用真实的 TDengine Cloud 的URL和令牌来替换上面的``和``。可以通过访问[TDengine Cloud](https://cloud.taosdata.com)来获取真实的值。 + + +然后运行下面的命令来生成 telegraf.conf 文件。 + +```bash +{{#include docs/examples/thirdparty/gen-telegraf-conf.sh:null:nrc}} +``` + +编辑”outputs.http“部分 + +```toml +{{#include docs/examples/thirdparty/telegraf-conf.toml:null:nrc}} +``` + +配置完成后 Telegraf 会开始收集CPU和内容的数据并发送到 TDengine 的数据库”telegraf“。”telegraf“数据库必须先通过 TDengine Cloud 创建。 + +## 启动 Telegraf + +使用新生的 telegraf.conf 文件启动 Telegraf。 + +```bash +telegraf --config telegraf.conf +``` + +## 验证 + +- 通过下面命令检查天气数据库"telegraf"被创建出来: + +```sql +show databases; +``` +![TDengine show telegraf databases](./telegraf-show-databases.webp) + +检查天气超级表 cpu 和 mem 被创建出来: + +```sql +show telegraf.stables; +``` + +![TDengine Cloud show telegraf stables](./telegraf-show-stables.webp) + +:::note + +- TDengine 接收 influxdb 格式数据默认生成的子表名是根据规则生成的唯一 ID 值。 +用户如需指定生成的表名,可以通过在 taos.cfg 里配置 smlChildTableName 参数来指定。如果通过控制输入数据格式,即可利用 TDengine 这个功能指定生成的表名。 +举例如下:配置 smlChildTableName=tname 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1。如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议) +::: + + diff --git a/docs/zh/07-data-in/30-schemaless-influxdb.md b/docs/zh/07-data-in/30-schemaless-influxdb.md new file mode 100644 index 0000000000000000000000000000000000000000..a3b3d149ac9a328d86312c6bf21c01e269c8b712 --- /dev/null +++ b/docs/zh/07-data-in/30-schemaless-influxdb.md @@ -0,0 +1,75 @@ +--- +sidebar_label: InfluxDB 行协议 +title: Schemaless - InfluxDB 行协议 +description: 通过 Schemaless 行协议写入数据 +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + + +这个部分我们会介绍如何通过无服务 InfluxDB 行协议的 REST 接口往 TDengine Cloud 写入数据。 + +## 配置 + +在您的终端命令行运行下面的命令来设置 TDengine Cloud 的令牌和URL为环境变量: + + + + +```bash +export TDENGINE_CLOUD_TOKEN="" +export TDENGINE_CLOUD_URL="" +``` + + + + +```bash +set TDENGINE_CLOUD_TOKEN="" +set TDENGINE_CLOUD_URL="" +``` + + + + +```powershell +$env:TDENGINE_CLOUD_TOKEN="" +$env:TDENGINE_CLOUD_URL="" +``` + + + + +## 插入 + +您可以使用任何支持 HTTP 协议的客户端通过访问 RESTful 的接口地址 `/influxdb/v1/write` 往 TDengine 里面写入兼容 InfluxDB 的数据。访问地址如下: + +```text +/influxdb/v1/write?db=&token= +``` + +支持 InfluxDB 查询参数如下: + +- `db` 指定 TDengine 使用的数据库名 +- `precision` TDengine 使用的时间精度 + - ns - 纳秒 + - u - 微妙 + - ms - 毫秒 + - s - 秒 + - m - 分 + - h - 小时 + +## 写入样例 +```bash +curl --request POST "$TDENGINE_CLOUD_URL/influxdb/v1/write?db=&token=$TDENGINE_CLOUD_TOKEN&precision=ns" --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577846800001000001" +``` + +## 使用 SQL 查询样例 +- `measurement` 是超级表名。 +- 您可以像这样通过标签过滤数据:`where host="host1"`。 +```bash +curl -L -d "select * from .measurement where host=\"host1\"" $TDENGINE_CLOUD_URL/rest/sql/test?token=$TDENGINE_CLOUD_TOKEN +``` diff --git a/docs/zh/07-data-in/35-schemaless-opentsdb-json.md b/docs/zh/07-data-in/35-schemaless-opentsdb-json.md new file mode 100644 index 0000000000000000000000000000000000000000..66de3990b31251f0ebdd3daf0b66be6927df21ce --- /dev/null +++ b/docs/zh/07-data-in/35-schemaless-opentsdb-json.md @@ -0,0 +1,62 @@ +--- +sidebar_label: OpenTSDB JSON 协议 +title: Schemaless - OpenTSDB JSON 协议 +description: 写入使用 OpenTSDB JSON 协议的数据 +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + + +这个部分我们会介绍如何通过无服务 OpenTSDB JSON 协议的 REST 接口往 TDengine Cloud 写入数据。 + + +## 配置 + +在您的终端命令行运行下面的命令来设置 TDengine Cloud 的令牌和URL为环境变量: + + + + +```bash +export TDENGINE_CLOUD_TOKEN="" +export TDENGINE_CLOUD_URL="" +``` + + + + +```bash +set TDENGINE_CLOUD_TOKEN="" +set TDENGINE_CLOUD_URL="" +``` + + + + +```powershell +$env:TDENGINE_CLOUD_TOKEN="" +$env:TDENGINE_CLOUD_URL="" +``` + + + +## 插入 + +您可以使用任何支持 HTTP 协议的客户端通过访问 RESTful 的接口地址 `/opentsdb/v1/put` 往 TDengine 里面写入兼容 OpenTSDB 的数据。访问地址如下: + +```text +/opentsdb/v1/put/json/?token= +``` +### 写入样例 +```bash +curl --request POST "$TDENGINE_CLOUD_URL/opentsdb/v1/put/json/?token=$TDENGINE_CLOUD_TOKEN" --data-binary "{\"metric\":\"meter_current\",\"timestamp\":1646846400,\"value\":10.3,\"tags\":{\"groupid\":2,\"location\":\"Beijing\",\"id\":\"d1001\"}}" +``` +## 使用 SQL 查询样例 +- `meter_current` 是超级表名。 +- 您可以像这样通过标签过滤数据:`where groupid=2`. +```bash +curl -L -d "select * from .meter_current where groupid=2" $TDENGINE_CLOUD_URL/rest/sql/test?token=$TDENGINE_CLOUD_TOKEN +``` diff --git a/docs/zh/07-data-in/38-schemaless-opentsdb-telnet.md b/docs/zh/07-data-in/38-schemaless-opentsdb-telnet.md new file mode 100644 index 0000000000000000000000000000000000000000..3e748b865d8e242a87d65df1177d00421eb3fb3b --- /dev/null +++ b/docs/zh/07-data-in/38-schemaless-opentsdb-telnet.md @@ -0,0 +1,65 @@ +--- +sidebar_label: OpenTSDB Telnet 协议 +title: Schemaless - OpenTSDB Telnet 协议 +description: 写入使用 OpenTSDB Telnet 协议的数据 +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + + +这个部分我们会介绍如何通过无服务 OpenTSDB Telnet 协议的 REST 接口往 TDengine Cloud 写入数据。 + + +## 配置 + +在您的终端命令行运行下面的命令来设置 TDengine Cloud 的令牌和URL为环境变量: + + + + +```bash +export TDENGINE_CLOUD_TOKEN="" +export TDENGINE_CLOUD_URL="" +``` + + + + +```bash +set TDENGINE_CLOUD_TOKEN="" +set TDENGINE_CLOUD_URL="" +``` + + + + +```powershell +$env:TDENGINE_CLOUD_TOKEN="" +$env:TDENGINE_CLOUD_URL="" +``` + + + + +## 插入 + +您可以使用任何支持 HTTP 协议的客户端通过访问 RESTful 的接口地址 `/opentsdb/v1/put` 往 TDengine 里面写入兼容 OpenTSDB 的数据。访问地址如下: + +```text +/opentsdb/v1/put/telnet/?token= +``` + +### 写入样例 +```bash +curl --request POST "$TDENGINE_CLOUD_URL/opentsdb/v1/put/telnet/?token=$TDENGINE_CLOUD_TOKEN" --data-binary "sys 1479496100 1.3E0 host=web01 interface=eth0" +``` + +## 使用 SQL 查询样例 +- `sys` 是超级表名。 +- 您可以像这样通过标签过滤数据:`where host="web01"`. +```bash +curl -L -d "select * from .sys where host=\"web01\"" $TDENGINE_CLOUD_URL/rest/sql/test?token=$TDENGINE_CLOUD_TOKEN +``` diff --git a/docs/zh/07-data-in/index.md b/docs/zh/07-data-in/index.md new file mode 100644 index 0000000000000000000000000000000000000000..9aa2a618d47505ddeab6e4ec945639236f0ca04b --- /dev/null +++ b/docs/zh/07-data-in/index.md @@ -0,0 +1,13 @@ +--- +sidebar_label: 数据写入 +title: 从TDengine云服务里面写入数据 +description: 有多种方式可以往 TDengine 里面写入数据。 +--- + +这章主要介绍目前有多种方式往 TDengine 里面写入数据,比如用户可以直接使用 TDengine SQL 往 TDengine Cloud 里面写入数据,也可以通过编程的方式使用 TDengine 提供的[连接器(Connector)](../programming/connector)往TDengine里面写入数据。TDengine 还提供压力测试工具 [taosBenchmark](../tools/taosbenchmark)往TDengine里面写入数据,另外 TDengine 企业版还提供工具 taosX 可以从一个 TDengine Cloud 实例同步数据到另外一个。 + +此外,通过第三方工具 Telegraf 和 Prometheus,也可以往 TDengine 写入数据。 + +:::注意 +由于权限的限制,有必须首先在云服务的数据浏览器里面创建数据库,然后才能往这个数据库里面写入数据。这个限制是所有写入方式必须首先做的。 +::: diff --git a/docs/zh/07-data-in/prometheus_data.webp b/docs/zh/07-data-in/prometheus_data.webp new file mode 100644 index 0000000000000000000000000000000000000000..760890656daae09b9127d52c03486ce9b2bb0913 Binary files /dev/null and b/docs/zh/07-data-in/prometheus_data.webp differ diff --git a/docs/zh/07-data-in/prometheus_read.webp b/docs/zh/07-data-in/prometheus_read.webp new file mode 100644 index 0000000000000000000000000000000000000000..2c91aa6fb8df897effddb4bfab3d522b6975ed1a Binary files /dev/null and b/docs/zh/07-data-in/prometheus_read.webp differ diff --git a/docs/zh/07-data-in/telegraf-show-databases.webp b/docs/zh/07-data-in/telegraf-show-databases.webp new file mode 100644 index 0000000000000000000000000000000000000000..8970f074eee3980d313de100218cbfaff84fbef0 Binary files /dev/null and b/docs/zh/07-data-in/telegraf-show-databases.webp differ diff --git a/docs/zh/07-data-in/telegraf-show-stables.webp b/docs/zh/07-data-in/telegraf-show-stables.webp new file mode 100644 index 0000000000000000000000000000000000000000..7f93f0cb8d1d8d10c0cc5fcf0c0a895df8f0edb9 Binary files /dev/null and b/docs/zh/07-data-in/telegraf-show-stables.webp differ diff --git a/docs/zh/09-data-out/04-taosdump.md b/docs/zh/09-data-out/04-taosdump.md new file mode 100644 index 0000000000000000000000000000000000000000..1a66a2d4a5be9933aa1d1881b592c833d0335a0b --- /dev/null +++ b/docs/zh/09-data-out/04-taosdump.md @@ -0,0 +1,122 @@ +--- +sidebar_label: taosDump +title: 使用 taosDump 来备份数据 +description: "taosdump 是一个支持从运行中的 TDengine 集群备份数据并将备份的数据恢复到相同或另一个运行中的 TDengine 集群中的工具应用程序" +--- + +## 简介 + +taosdump 是一个支持从运行中的 TDengine 集群备份数据并将备份的数据恢复到相同或另一个运行中的 TDengine 集群中的工具应用程序。 + +taosdump 可以用数据库、超级表或普通表作为逻辑数据单元进行备份,也可以对数据库、超级 +表和普通表中指定时间段内的数据记录进行备份。使用时可以指定数据备份的目录路径,如果 +不指定位置,taosdump 默认会将数据备份到当前目录。 + +如果指定的位置已经有数据文件,taosdump 会提示用户并立即退出,避免数据被覆盖。这意味着同一路径只能被用于一次备份。 +如果看到相关提示,请小心操作。 + +taosdump 是一个逻辑备份工具,它不应被用于备份任何原始数据、环境设置、 +硬件信息、服务端配置或集群的拓扑结构。taosdump 使用 +[Apache AVRO](https://avro.apache.org/)作为数据文件格式来存储备份数据。 + +## Installation + +Please refer to [Install taosTools](https://docs.tdengine.com/cloud/tools/taosdump/#installation). + +## Common usage scenarios + +### taosdump backup data + +1. backing up all databases: specify `-A` or `-all-databases` parameter. +2. backup multiple specified databases: use `-D db1,db2,...` parameters; +3. back up some super or normal tables in the specified database: use `-dbname stbname1 stbname2 tbname1 tbname2 ...` parameters. Note that the first parameter of this input sequence is the database name, and only one database is supported. The second and subsequent parameters are the names of super or normal tables in that database, separated by spaces. +4. back up the system log database: TDengine clusters usually contain a system database named `log`. The data in this database is the data that TDengine runs itself, and the taosdump will not back up the log database by default. If users need to back up the log database, users can use the `-a` or `-allow-sys` command-line parameter. +5. Loose mode backup: taosdump version 1.4.1 onwards provides `-n` and `-L` parameters for backing up data without using escape characters and "loose" mode, which can reduce the number of backups if table names, column names, tag names do not use escape characters. This can also reduce the backup data time and backup data footprint. If you are unsure about using `-n` and `-L` conditions, please use the default parameters for "strict" mode backup. See the [official documentation](https://docs.tdengine.com/taos-sql/escape/) for a description of escaped characters. + + +:::tip + +- taosdump versions after 1.4.1 provide the `-I` argument for parsing Avro file schema and data. If users specify `-s` then only taosdump will parse schema. +- Backups after taosdump 1.4.2 use the batch count specified by the `-B` parameter. The default value is 16384. If, in some environments, low network speed or disk performance causes "Error actual dump ... batch ...", then try changing the `-B` parameter to a smaller value. + +::: + + +### taosdump recover data + +Restore the data file in the specified path: use the `-i` parameter plus the path to the data file. You should not use the same directory to backup different data sets, and you should not backup the same data set multiple times in the same path. Otherwise, the backup data will cause overwriting or multiple backups. + + +:::tip +taosdump internally uses TDengine stmt binding API for writing recovery data with a default batch size of 16384 for better data recovery performance. If there are more columns in the backup data, it may cause a "WAL size exceeds limit" error. You can try to adjust the batch size to a smaller value by using the `-B` parameter. + +::: + + +## Detailed command-line parameter list + +The following is a detailed list of taosdump command-line arguments. + +``` +Usage: taosdump [OPTION...] dbname [tbname ...] + or: taosdump [OPTION...] --databases db1,db2,... + or: taosdump [OPTION...] --all-databases + or: taosdump [OPTION...] -i inpath + or: taosdump [OPTION...] -o outpath + + -h, --host=HOST Server host from which to dump data. Default is + localhost. + -p, --password User password to connect to server. Default is + taosdata. + -P, --port=PORT Port to connect + -u, --user=USER User name used to connect to server. Default is + root. + -c, --config-dir=CONFIG_DIR Configure directory. Default is /etc/taos + -i, --inpath=INPATH Input file path. + -o, --outpath=OUTPATH Output file path. + -r, --resultFile=RESULTFILE DumpOut/In Result file path and name. + -a, --allow-sys Allow to dump system database + -A, --all-databases Dump all databases. + -D, --databases=DATABASES Dump listed databases. Use comma to separate + database names. + -N, --without-property Dump database without its properties. + -s, --schemaonly Only dump table schemas. + -y, --answer-yes Input yes for prompt. It will skip data file + checking! + -d, --avro-codec=snappy Choose an avro codec among null, deflate, snappy, + and lzma. + -S, --start-time=START_TIME Start time to dump. Either epoch or + ISO8601/RFC3339 format is acceptable. ISO8601 + format example: 2017-10-01T00:00:00.000+0800 or + 2017-10-0100:00:00:000+0800 or '2017-10-01 + 00:00:00.000+0800' + -E, --end-time=END_TIME End time to dump. Either epoch or ISO8601/RFC3339 + format is acceptable. ISO8601 format example: + 2017-10-01T00:00:00.000+0800 or + 2017-10-0100:00:00.000+0800 or '2017-10-01 + 00:00:00.000+0800' + -B, --data-batch=DATA_BATCH Number of data per query/insert statement when + backup/restore. Default value is 16384. If you see + 'error actual dump .. batch ..' when backup or if + you see 'WAL size exceeds limit' error when + restore, please adjust the value to a smaller one + and try. The workable value is related to the + length of the row and type of table schema. + -I, --inspect inspect avro file content and print on screen + -L, --loose-mode Use loose mode if the table name and column name + use letter and number only. Default is NOT. + -n, --no-escape No escape char '`'. Default is using it. + -T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is + 5. + -C, --cloud=CLOUD_DSN specify a DSN to access TDengine cloud service + -R, --restful Use RESTful interface to connect TDengine + -g, --debug Print debug info. + -?, --help Give this help list + --usage Give a short usage message + -V, --version Print program version + +Mandatory or optional arguments to long options are also mandatory or optional +for any corresponding short options. + +Report bugs to . +``` diff --git a/docs/zh/09-data-out/05-prometheus.md b/docs/zh/09-data-out/05-prometheus.md new file mode 100644 index 0000000000000000000000000000000000000000..ed7ae626261f9ba754f5104f89041ec7ae421c52 --- /dev/null +++ b/docs/zh/09-data-out/05-prometheus.md @@ -0,0 +1,34 @@ +--- +sidebar_label: Prometheus +title: Prometheus remote read +description: Prometheus remote_read from TDengine cloud server +--- + +Prometheus is a widespread open-source monitoring and alerting system. Prometheus joined the Cloud Native Computing Foundation (CNCF) in 2016 as the second incubated project after Kubernetes, which has a very active developer and user community. + +Prometheus provides `remote_read` interface to leverage other database products as its storage engine. To enable users of the Prometheus ecosystem to take advantage of TDengine's efficient querying, TDengine also provides support for this interface so that data stored in TDengine can be queried via the `remote_read` interface, taking full advantage of TDengine's efficient query performance and clustering capabilities for time-series data. + +## Install Prometheus + +Please refer to [Install Prometheus](https://docs.tdengine.com/cloud/data-in/prometheus#install-prometheus). + +## Configure Prometheus + +Please refer to [Configure Prometheus](https://docs.tdengine.com/cloud/prometheus/#configure-prometheus). + +## Start Prometheus + +Please refer to [Start Prometheus](https://docs.tdengine.com/cloud/data-in/prometheus/#start-prometheus). + +## Verify Remote Read + +Lets retrieve some metrics from TDengine Cloud via prometheus web server. Browse to and use the "Graph" tab. + +Enter the following expression to graph the per-second rate of chunks being created in the self-scraped Prometheus: + +``` +rate(prometheus_tsdb_head_chunks_created_total[1m]) +``` + +![TDengine prometheus remote_read](prometheus_read.webp) + diff --git a/docs/zh/09-data-out/index.md b/docs/zh/09-data-out/index.md new file mode 100644 index 0000000000000000000000000000000000000000..2cb3b928c5641b03b3d70df5c83ce1817114fd45 --- /dev/null +++ b/docs/zh/09-data-out/index.md @@ -0,0 +1,7 @@ +--- +sidebar_label: 数据输出 +title: 数据输出 +description: TDengine Cloud 的多种数据输出方式。 +--- + +This chapter introduces how to get data out of TDengine cloud service. Besides normal query using SQL, users can use [data subscripition](../data-subscription) which is provided by TDengine cloud service to do data subscription and share the data stored in TDengine to others. TDengine provides [connectors](../programming/connector) for application programmers to access the data stored in TDengine. TDengine also provides some tools, like [taosdump](../tools/taosdump), which is a tool provided by TDengine to dump the data stored in TDengine cloud service into files, and `taosX`, which is another tool to sync up the data in one TDengine cloud service into another. Furthermore, 3rd party tools, like prometheus, can also be used to write data into TDengine. diff --git a/docs/zh/09-data-out/prometheus_data.webp b/docs/zh/09-data-out/prometheus_data.webp new file mode 100644 index 0000000000000000000000000000000000000000..760890656daae09b9127d52c03486ce9b2bb0913 Binary files /dev/null and b/docs/zh/09-data-out/prometheus_data.webp differ diff --git a/docs/zh/09-data-out/prometheus_read.webp b/docs/zh/09-data-out/prometheus_read.webp new file mode 100644 index 0000000000000000000000000000000000000000..2c91aa6fb8df897effddb4bfab3d522b6975ed1a Binary files /dev/null and b/docs/zh/09-data-out/prometheus_read.webp differ diff --git a/docs/zh/10-programming/01-connect/01-python.md b/docs/zh/10-programming/01-connect/01-python.md new file mode 100644 index 0000000000000000000000000000000000000000..4f5d0baf4223c2725400cf9158e658ad21ecefa1 --- /dev/null +++ b/docs/zh/10-programming/01-connect/01-python.md @@ -0,0 +1,130 @@ +--- +sidebar_label: Python +title: 使用 Python 连接器建立连接 +description: 使用 Python 连接器建立和 TDengine Cloud 的连接 +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +## 安装连接器 + +首先,您需要安装版本大于 `2.6.2` 的 `taospy` 模块,然后在终端里面执行下面的命令。 + + + + +```bash +pip3 install -U taospy[ws] +``` + +您必须首先安装 Python3 。 + + + + +```bash +conda install -c conda-forge taospy taospyws +``` + + + + +## 配置 + +在您的终端里面执行下面的命令来保存 TDengine Cloud 的 URL 和令牌到系统的环境变量里面: + + + + +```bash +export TDENGINE_CLOUD_TOKEN="" +export TDENGINE_CLOUD_URL="" +``` + + + + +```shell +set TDENGINE_CLOUD_TOKEN= +set TDENGINE_CLOUD_URL= +``` + + + + +```powershell +$env:TDENGINE_CLOUD_TOKEN='' +$env:TDENGINE_CLOUD_URL='' +``` + + + + +另外您也可以在您的 IDE 的运行配置里面设置这些环境变量。 + + +:::note +替换 为 TDengine Cloud 的令牌和 URL 。 +获取 TDengine Cloud 的令牌和 URL,可以登录[TDengine Cloud](https://cloud.taosdata.com) 后点击左边的”编程“菜单,然后选择”Python“。 +::: + + +## 建立连接 + +复制下面的代码到您的编辑器,然后执行这段代码。如果您正在使用 Jupyter 并且按照它的指南搭建好环境,您可以负责下面代码到您浏览器的 Jupyter 编辑器。 + +```python +{{#include docs/examples/python/develop_tutorial.py:connect}} +``` + +对于如何写入数据和查询输入,请参考。 + +想知道更多通过 REST 接口写入数据的详情,请参考[REST 接口](https://docs.taosdata.com/cloud/programming/connector/rest-api/). + +## Jupyter + +### 步骤一:安装模块 + +对于熟悉使用 Jupyter 来进行 Python 编程的用户,在您的环境中必须准备好 TDengine 的 Python 连接器和 Jupyter。如果您还没有这样做,请使用下面的命令来安装他们。 + + + + +```bash +pip install jupyterlab +pip3 install -U taospy[ws] +``` + +您接下来需要安装 Python3 。 + + + + +``` +conda install -c conda-forge jupyterlab +conda install -c conda-forge taospy +``` + + + + +### 步骤二:配置 + +在使用 Jupyter 和 TDengine Cloud 连接连接之前,需要在环境变量设置按照下面内容设置,然后再启动 Jupyter。我们使用 Linux 脚本作为例子。 + +```bash +export TDENGINE_CLOUD_TOKEN="" +export TDENGINE_CLOUD_URL="" +jupyter lab +``` + +### 步骤三:建立连接 + +一旦 Jupter lab 启动成功,Jupyter lab 服务就会自动和 TDengine Cloud 连接并且显示在浏览器里面。您可以创建一个新的 notebook 页面,然后复制下面的样例代码到这个页面中并运行。 + +```python +{{#include docs/examples/python/develop_tutorial.py:connect}} +``` diff --git a/docs/zh/10-programming/01-connect/02-java.md b/docs/zh/10-programming/01-connect/02-java.md new file mode 100644 index 0000000000000000000000000000000000000000..073829eca3ae7461191748e8166b6a6e586e8b85 --- /dev/null +++ b/docs/zh/10-programming/01-connect/02-java.md @@ -0,0 +1,80 @@ +--- +sidebar_label: Java +title: 使用 Java 连接器建立连接 +description: 使用 Java 连接器建立和 TDengine Cloud 的连接 +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +## 增加依赖包 + + + + + +```xml title="pom.xml" +{{#include docs/examples/java/pom.xml:dep}} +``` + + + + +```groovy title="build.gradle" +dependencies { + implementation 'com.taosdata.jdbc:taos-jdbcdriver:3.0.0.0' +} +``` + + + + +## 配置 + +在您的终端里面执行下面的命令设置 JDBC URL 为环境变量: + + + + +```bash +export TDENGINE_JDBC_URL="" +``` + + + +```shell +set TDENGINE_JDBC_URL= +``` + + + +```powershell +$env:TDENGINE_JDBC_URL='' +``` + + + + + +另外,您也可以在您的 IDE 的运行配置里设置环境变量 + + +:::note +替换 为 真实的 JDBC URL,比如 `jdbc:TAOS-RS://example.com?usessl=true&token=xxxx`。 + +获取真实的 JDBC URL 的值,请登录[TDengine Cloud](https://cloud.taosdata.com) 后点击左边的”编程“菜单,然后选择”Java“。 +::: + +## 建立连接 + +下面的代码是先从环境变量里面创建 JDBC URL ,然后创建 `Connection` 这个 JDBC 连接标准对象。 + +```java +{{#include docs/examples/java/src/main/java/com/taos/example/ConnectCloudExample.java:connect}} +``` + +客户端连接建立连接以后,想了解更多写入数据和查询数据的内容,请参考 and . + +想知道更多通过 REST 接口写入数据的详情,请参考[REST 接口](https://docs.taosdata.com/cloud/programming/connector/rest-api/). diff --git a/docs/zh/10-programming/01-connect/03-go.md b/docs/zh/10-programming/01-connect/03-go.md new file mode 100644 index 0000000000000000000000000000000000000000..8d87ba63dfdb6fdd2bf6e9fec6d5f3f411f35475 --- /dev/null +++ b/docs/zh/10-programming/01-connect/03-go.md @@ -0,0 +1,85 @@ +--- +sidebar_label: Go +title: 使用 Go 连接器建立连接 +description: 使用 Go 连接器建立和 TDengine Cloud 的连接 +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +## 初始化模块 + +``` +go mod init tdengine.com/example +``` + +## 增加模块依赖 + +在 `go.mod` 文件中增加 `driver-go` 依赖。 + +```go-mod title="go.mod" +{{#include docs/examples/go/connectexample/go.mod}} +``` + +## 配置 + +在您的终端里面执行下面的命令设置 DSN(数据源名称) 为系统环境变量: + + + + +```bash +export TDENGINE_GO_DSN="" +``` + + + + +```bash +set TDENGINE_GO_DSN= +``` + + + + +```powershell +$env:TDENGINE_GO_DSN='' +``` + + + + + + +:::note +替换 为 真实的值,格式应该是 `https()/?token=`。 + +获取真实的 `goDSN` 的值,请登录[TDengine Cloud](https://cloud.taosdata.com) 后点击左边的”编程“菜单,然后选择”Go“。 +::: + + +## 建立连接 + +复制代码到 main.go 文件。 + +```go title="main.go" +{{#include docs/examples/go/connectexample/main.go}} +``` + +执行下面的命令下载相关依赖: + +```bash +go mod tidy +``` + +最后运行下面命令测试连接: + +```bash +go run main.go +``` + +客户端连接建立连接以后,想了解更多写入数据和查询数据的内容,请参考 and . + +想知道更多通过 REST 接口写入数据的详情,请参考[REST 接口](https://docs.taosdata.com/cloud/programming/connector/rest-api/). diff --git a/docs/zh/10-programming/01-connect/04-rust.md b/docs/zh/10-programming/01-connect/04-rust.md new file mode 100644 index 0000000000000000000000000000000000000000..ac3ff1d00b206c45425f15df67bba0cefa3efbc1 --- /dev/null +++ b/docs/zh/10-programming/01-connect/04-rust.md @@ -0,0 +1,80 @@ +--- +sidebar_label: Rust +title: 使用 Rust 连接器建立连接 +description: 使用 Rust 连接器建立和 TDengine Cloud 的连接 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +## 创建项目 + +```bash +cargo new --bin cloud-example +``` + +## 增加依赖 + +在 `Cargo.toml` 文件中增加下面的依赖: + +```toml title="Cargo.toml" +[package] +name = "cloud-example" +version = "0.1.0" +edition = "2021" + +[dependencies] +taos = { version = "*", default-features = false, features = ["ws"] } +tokio = { version = "1", features = ["full"]} +anyhow = "1.0.0" +``` + +## 配置 + +在您的终端里面执行下面的命令设置 TDengine Cloud 的 DSN 为环境变量: + + + + +```bash +export TDENGINE_CLOUD_DSN="" +``` + + + + +```bash +set TDENGINE_CLOUD_DSN= +``` + + + + +```powershell +$env:TDENGINE_CLOUD_DSN='' +``` + + + + + +:::note +替换 为 真实的值,格式应该是 `https()/?token=`。 + +获取真实的 `DSN` 的值,请登录[TDengine Cloud](https://cloud.taosdata.com) 后点击左边的”编程“菜单,然后选择”Rust“。 + +::: + + +## 建立连接 + +复制下面的代码到 `main.rs` 文件。 + +```rust title="main.rs" +{{#include docs/examples/rust/cloud-example/src/main.rs}} +``` + +客户端连接建立连接以后,想了解更多写入数据和查询数据的内容,请参考 and . + +想知道更多通过 REST 接口写入数据的详情,请参考[REST 接口](https://docs.taosdata.com/cloud/programming/connector/rest-api/). diff --git a/docs/zh/10-programming/01-connect/05-node.md b/docs/zh/10-programming/01-connect/05-node.md new file mode 100644 index 0000000000000000000000000000000000000000..3f36f6bcfa7f2b2413d27059d243f65cecfd8bb6 --- /dev/null +++ b/docs/zh/10-programming/01-connect/05-node.md @@ -0,0 +1,63 @@ +--- +sidebar_label: Node.js +title: 使用 Node.js 连接器建立连接 +description: 使用 Node.js 连接器建立和 TDengine Cloud 的连接 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +## 安装连接器 + +```bash +npm install @tdengine/rest +``` +## 配置 + +在您的终端里面执行下面的命令设置 TDengine Cloud 令牌为环境变量: + + + + +```bash +export TDENGINE_CLOUD_TOKEN="" +export TDENGINE_CLOUD_URL="" +``` + + + + +```bash +set TDENGINE_CLOUD_TOKEN= +set TDENGINE_CLOUD_URL= +``` + + + + +```powershell +$env:TDENGINE_CLOUD_TOKEN='' +$env:TDENGINE_CLOUD_URL='' +``` + + + + + + +:::note +替换 为 TDengine Cloud 的令牌和 URL 。 +获取 TDengine Cloud 的令牌和 URL,可以登录[TDengine Cloud](https://cloud.taosdata.com) 后点击左边的”编程“菜单,然后选择”Node.js“。 +::: + + +## Connect + +```javascript +{{#include docs/examples/node/connect.js}} +``` + +客户端连接建立连接以后,想了解更多写入数据和查询数据的内容,请参考 and . + +想知道更多通过 REST 接口写入数据的详情,请参考[REST 接口](https://docs.taosdata.com/cloud/programming/connector/rest-api/). diff --git a/docs/zh/10-programming/01-connect/06-csharp.md b/docs/zh/10-programming/01-connect/06-csharp.md new file mode 100644 index 0000000000000000000000000000000000000000..c434b323bba11da4f924b57678b67d5b98a828b2 --- /dev/null +++ b/docs/zh/10-programming/01-connect/06-csharp.md @@ -0,0 +1,90 @@ +--- +sidebar_label: C# +title: 使用 C# 连接器建立连接 +description: 使用 C# 连接器建立和 TDengine Cloud 的连接 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +## 创建项目 + +```bash +dotnet new console -o example +``` + +## Add C# TDengine Driver class lib + +```bash +cd example +vim example.csproj +``` + +增加下面的 ItemGroup 和 Task 配置到您的工程文件中。 + +```XML + + + + + + + + + +``` + +```bash +dotnet add package TDengine.Connector +``` + +## Config + +在您的终端里面执行下面的命令设置 TDengine Cloud 令牌为环境变量: + + + + +```bash +export TDENGINE_CLOUD_DSN="" +``` + + + + +```bash +set TDENGINE_CLOUD_DSN= +``` + + + + +```powershell +$env:TDENGINE_CLOUD_DSN='' +``` + + + + + +:::note +替换 为 真实的值,格式应该是 `https()/?token=`。 + +获取真实的 `DSN` 的值,请登录[TDengine Cloud](https://cloud.taosdata.com) 后点击左边的”编程“菜单,然后选择”C#“。 +::: + + +## 建立连接 + +``` XML +{{#include docs/examples/csharp/cloud-example/connect/connect.csproj}} +``` + +```C# +{{#include docs/examples/csharp/cloud-example/connect/Program.cs}} +``` + +客户端连接建立连接以后,想了解更多写入数据和查询数据的内容,请参考 and . + +想知道更多通过 REST 接口写入数据的详情,请参考[REST 接口](https://docs.taosdata.com/cloud/programming/connector/rest-api/). diff --git a/docs/zh/10-programming/01-connect/09-rest-api.md b/docs/zh/10-programming/01-connect/09-rest-api.md new file mode 100644 index 0000000000000000000000000000000000000000..d7805aaec197c68825526ba84ee0d5d65d2b94b9 --- /dev/null +++ b/docs/zh/10-programming/01-connect/09-rest-api.md @@ -0,0 +1,57 @@ +--- +sidebar_label: REST API +title: REST API +description: 使用 RESTful 接口建立和 TDengine Cloud 的连接 +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +## 配置 + +在您的终端里面执行下面的命令来保存 TDengine Cloud 的 URL 和令牌到系统的环境变量里面: + + + + +```bash +export TDENGINE_CLOUD_TOKEN="" +export TDENGINE_CLOUD_URL="" +``` + + + + +```bash +set TDENGINE_CLOUD_TOKEN= +set TDENGINE_CLOUD_URL= +``` + + + + +```powershell +$env:TDENGINE_CLOUD_TOKEN='' +$env:TDENGINE_CLOUD_URL='' +``` + + + + + +:::note +替换 为 TDengine Cloud 的令牌和 URL 。 +获取 TDengine Cloud 的令牌和 URL,可以登录[TDengine Cloud](https://cloud.taosdata.com) 后点击左边的”编程“菜单,然后选择”REST API“。 +::: + +## 使用 + +TDengine REST API 是使用标准的 HTTP 协议并提供一直简易的方式访问 TDengine 实例。比如下面的命令,通过 URL,令牌和 SQL 命令来组装 HTTP 请求,并使用命令行工具 `curl` 来运行这个命令。 + +```bash +curl -L \ + -d "select name, ntables, status from information_schema.ins_databases;" \ + $TDENGINE_CLOUD_URL/rest/sql?token=$TDENGINE_CLOUD_TOKEN +``` diff --git a/docs/zh/10-programming/01-connect/_category_.yml b/docs/zh/10-programming/01-connect/_category_.yml new file mode 100644 index 0000000000000000000000000000000000000000..f75d563ac9e061f8b8d66392031413f4051e351e --- /dev/null +++ b/docs/zh/10-programming/01-connect/_category_.yml @@ -0,0 +1 @@ +label: 建立连接 diff --git a/docs/zh/10-programming/01-connect/index.md b/docs/zh/10-programming/01-connect/index.md new file mode 100644 index 0000000000000000000000000000000000000000..e5d6dbf99624f4f14160fbe8b429ef6db3f81c56 --- /dev/null +++ b/docs/zh/10-programming/01-connect/index.md @@ -0,0 +1,14 @@ +--- +sidebar_label: 快速开始 +title: 建立与 TDengine Cloud 的连接 +description: 使用连接器建立与 TDengine Cloud 的连接 +--- + +这个部分主要是讲述如何使用 TDengine 提供的丰富连接器与 TDengine Cloud 建立连接,这样用户就能快速开发自己的应用 + +```mdx-code-block +import DocCardList from '@theme/DocCardList'; +import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; + + +``` diff --git a/docs/zh/10-programming/02-model.md b/docs/zh/10-programming/02-model.md new file mode 100644 index 0000000000000000000000000000000000000000..276860203442175535bda5f91fa92d55a5cde73e --- /dev/null +++ b/docs/zh/10-programming/02-model.md @@ -0,0 +1,72 @@ +--- +sidebar_label: 数据建模 +title: TDengine 数据建模 +description: TDengine 中如何建立数据模型 +--- + +TDengine 采用类关系型数据模型,需要建库、建表。因此对于一个具体的应用场景,需要考虑库、超级表和普通表的设计。本节不讨论细致的语法规则,只介绍概念。 + +## 创建库 + +不同类型的数据采集点往往具有不同的数据特征,包括数据采集频率的高低,数据保留时间的长短,副本的数目,数据块的大小,是否允许更新数据等等。为了在各种场景下 TDengine 都能以最大效率工作,TDengine 建议将不同数据特征的表创建在不同的库里,因为每个库可以配置不同的存储策略。创建一个库时,除 SQL 标准的选项外,还可以指定保留时长、副本数、缓存大小、时间精度、文件块里最大最小记录条数、是否压缩、一个数据文件覆盖的天数等多种参数。 + +![TDengine create-database](./create-database.png) + +详细的语法及参数请见 [数据库管理](/taos-sql/database) 章节。 + +:::note + +- 任何一张表或超级表必须属于某个库,在创建表之前,必须先创建库。 +- 创建并插入记录、查询历史记录的时候,均需要指定时间戳。 + +::: + +## 创建超级表 + +一个物联网系统,往往存在多种类型的设备,比如对于电网,存在智能电表、变压器、母线、开关等等。为便于多表之间的聚合,使用 TDengine, 需要对每个类型的数据采集点创建一个超级表。以 [表 1](/tdinternal/arch#model_table1) 中的智能电表为例,可以使用如下的 SQL 命令创建超级表: + +```sql +CREATE STABLE power.meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int); +``` + +与创建普通表一样,创建超级表时,需要提供表名(示例中为 meters),表结构 Schema,即数据列的定义。第一列必须为时间戳(示例中为 ts),其他列为采集的物理量(示例中为 current, voltage, phase),数据类型可以为整型、浮点型、字符串等。除此之外,还需要提供标签的 Schema (示例中为 location, groupId),标签的数据类型可以为整型、浮点型、字符串等。采集点的静态属性往往可以作为标签,比如采集点的地理位置、设备型号、设备组 ID、管理员 ID 等等。标签的 Schema 可以事后增加、删除、修改。具体定义以及细节请见 [TDengine SQL 的超级表管理](/taos-sql/stable) 章节。 + +每一种类型的数据采集点需要建立一个超级表,因此一个物联网系统,往往会有多个超级表。对于电网,我们就需要对智能电表、变压器、母线、开关等都建立一个超级表。在物联网中,一个设备就可能有多个数据采集点(比如一台风力发电的风机,有的采集点采集电流、电压等电参数,有的采集点采集温度、湿度、风向等环境参数),这个时候,对这一类型的设备,需要建立多张超级表。 + +一张超级表最多容许 4096 列,如果一个采集点采集的物理量个数超过 4096,需要建多张超级表来处理。一个系统可以有多个 Database,一个 Database 里可以有一到多个超级表。 + +## 创建表 + +TDengine 对每个数据采集点需要独立建表。与标准的关系型数据库一样,一张表有表名,Schema,但除此之外,还可以带有一到多个标签。创建时,需要使用超级表做模板,同时指定标签的具体值。以 [表 1](/tdinternal/arch#model_table1) 中的智能电表为例,可以使用如下的 SQL 命令建表: + +```sql +CREATE TABLE power.d1001 USING meters TAGS ("California.SanFrancisco", 2); +``` + +其中 d1001 是表名,meters 是超级表的表名,后面紧跟标签 Location 的具体标签值为 "California.SanFrancisco",标签 groupId 的具体标签值为 2。虽然在创建表时,需要指定标签值,但可以事后修改。详细细则请见 [TDengine SQL 的表管理](/taos-sql/table) 章节。 + +在 TDengine 系统里面, 一个测点通过超级表创建一张表。在部分的 TDengine 文档里面,通过超级表创建的表叫做子表。所有可以在普通表里面使用的 SQL 命令都可以应用到子表里面。 + +:::warning +我们不推荐使用另外一个数据库里面的超级表作为模板来创建这个数据库的子表。 + +:::tip +TDengine 建议将数据采集点的全局唯一 ID 作为表名(比如设备序列号)。但对于有的场景,并没有唯一的 ID,可以将多个 ID 组合成一个唯一的 ID。不建议将具有唯一性的 ID 作为标签值。 + +### 自动建表 + +在某些特殊场景中,用户在写数据时并不确定某个数据采集点的表是否存在,此时可在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表且后面的 USING 语句被忽略。比如: + +```sql +INSERT INTO power.d1001 USING meters TAGS ("California.SanFrancisco", 2) VALUES (NOW, 10.2, 219, 0.32); +``` + +上述 SQL 语句将记录`(NOW, 10.2, 219, 0.32)`插入表 d1001。如果表 d1001 还未创建,则使用超级表 meters 做模板自动创建,同时打上标签值 `"California.SanFrancisco", 2`。 + +关于自动建表的详细语法请参见 [插入记录时自动建表](/taos-sql/insert#插入记录时自动建表) 章节。 + +## 多列模型 vs 单列模型 + +TDengine 支持多列模型,只要物理量是一个数据采集点同时采集的(时间戳一致),这些量就可以作为不同列放在一张超级表里。但还有一种极限的设计,单列模型,每个采集的物理量都单独建表,因此每种类型的物理量都单独建立一超级表。比如电流、电压、相位,就建三张超级表。 + +TDengine 建议尽可能采用多列模型,因为插入效率以及存储效率更高。但对于有些场景,一个采集点的采集量的种类经常变化,这个时候,如果采用多列模型,就需要频繁修改超级表的结构定义,让应用变的复杂,这个时候,采用单列模型会显得更简单。 diff --git a/docs/zh/10-programming/03-insert.md b/docs/zh/10-programming/03-insert.md new file mode 100644 index 0000000000000000000000000000000000000000..753dc82fdb1f9a3403f8044811e8785d351fc2fb --- /dev/null +++ b/docs/zh/10-programming/03-insert.md @@ -0,0 +1,106 @@ +--- +sidebar_label: 写入 +title: SQL 写入数据 +description: 使用 TDengine SQL 写入数据的开发指南 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +## SQL 样例 + +这里有一些 `INSERT` 语句的基本样例。用户可以通过 TDengine CLI,TDengine Cloud 的数据浏览器或者通过 TDengine 连接器开发等执行这些语句。 + +### 一次写入一条 + +下面这条 INSERT 就将一条记录写入到表 d1001 中: + +```sql +INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31); +``` + +### 一次写入多条 + +TDengine 支持一次写入多条记录,比如下面这条命令就将两条记录写入到表 d1001 中: + +```sql +INSERT INTO test.d1001 VALUES (1538548684000, 10.2, 220, 0.23) (1538548696650, 10.3, 218, 0.25); +``` + +### 一次写入多表 + +TDengine 也支持一次向多个表写入数据,比如下面这条命令就向 d1001 写入两条记录,向 d1002 写入一条记录: + +```sql +INSERT INTO test.d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 218, 0.33) test.d1002 VALUES (1538548696800, 12.3, 221, 0.31); +``` + +详细的 SQL INSERT 语法规则参考 [TDengine SQL 的数据写入](https://docs.taosdata.com/cloud/taos-sql/insert)。 + +## 连接器样例 + +:::note +在执行下面样例代码的之前,您必须首先建立和 TDengine Cloud 的连接,请参考 [连接 云服务](../../programming/connect/). + +::: + + + +这个例子中,我们使用 `execute` 方法来执行 SQL 和得到被影响的行。参数 `conn` 是类`taosrest.TaosRestConnection` 的一个实例,请参考[连接培训](../../programming/connect/python#connect). + +```python +{{#include docs/examples/python/develop_tutorial.py:insert}} +``` + + + +```java +{{#include docs/examples/java/src/main/java/com/taos/example/CloudTutorial.java:insert}} +``` + + + + +```go +{{#include docs/examples/go/tutorial/main.go:insert}} +``` + + + + +在这个例子中,我们使用 `exec` 方法来执行 SQL 。`exec` 是为非查询的 SQL 语句设计的,所有返回的数据都会被忽略。 + +```rust +{{#include docs/examples/rust/cloud-example/examples/tutorial.rs:insert}} +``` + + + + +```javascript +{{#include docs/examples/node/insert.js}} +``` + + + + + +``` XML +{{#include docs/examples/csharp/cloud-example/inout/inout.csproj}} +``` + +```csharp +{{#include docs/examples/csharp/cloud-example/inout/Program.cs:insert}} +``` + + + + + +:::note + +由于 RESTful 接口无状态, 不能使用 `USE db;` 语句来切换数据库, 所以在上面示例中使用了`dbName.tbName`指定表名。 + +::: + +# \ No newline at end of file diff --git a/docs/zh/10-programming/04-query.md b/docs/zh/10-programming/04-query.md new file mode 100644 index 0000000000000000000000000000000000000000..be0ae7322565ec2f0a041e22a36a2642e28daf5e --- /dev/null +++ b/docs/zh/10-programming/04-query.md @@ -0,0 +1,261 @@ +--- +sidebar_label: 查询数据 +title: 查询数据 +description: "主要查询功能,通过连接器执行同步查询和异步查询" +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +## 主要查询功能 + +TDengine 采用 SQL 作为查询语言。应用程序可以通过 REST API 或连接器发送 SQL 语句,用户还可以通过 TDengine 命令行工具 taos 手动执行 SQL 即席查询(Ad-Hoc Query)。TDengine 支持如下查询功能: + +- 单列、多列数据查询 +- 标签和数值的多种过滤条件:>, <, =, <\>, like 等 +- 聚合结果的分组(Group by)、排序(Order by)、约束输出(Limit/Offset) +- 时间窗口(Interval)、会话窗口(Session)和状态窗口(State_window)等窗口切分聚合查询 +- 数值列及聚合结果的四则运算 +- 时间戳对齐的连接查询(Join Query: 隐式连接)操作 +- 多种聚合/计算函数: count, max, min, avg, sum, twa, stddev, leastsquares, top, bottom, first, last, percentile, apercentile, last_row, spread, diff 等 + +例如:在命令行工具 taos 中,从表 d1001 中查询出 voltage > 215 的记录,按时间降序排列,仅仅输出 2 条。 + +```sql title="SQL" +select * from test.d101 where voltage > 215 order by ts desc limit 2; +``` + +```txt title="output" + ts | current | voltage | phase | +====================================================================================== + 2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 | + 2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 | +Query OK, 2 row(s) in set (0.001100s) +``` + +为满足物联网场景的需求,TDengine 支持几个特殊的函数,比如 twa(时间加权平均),spread (最大值与最小值的差),last_row(最后一条记录)等,更多与物联网场景相关的函数将添加进来。 + +具体的查询语法请看 [TDengine SQL 的数据查询](https://docs.taosdata.com/cloud/taos-sql/select) 章节。 + +## 多表聚合查询 + +物联网场景中,往往同一个类型的数据采集点有多个。TDengine 采用超级表(STable)的概念来描述某一个类型的数据采集点,一张普通的表来描述一个具体的数据采集点。同时 TDengine 使用标签来描述数据采集点的静态属性,一个具体的数据采集点有具体的标签值。通过指定标签的过滤条件,TDengine 提供了一高效的方法将超级表(某一类型的数据采集点)所属的子表进行聚合查询。对普通表的聚合函数以及绝大部分操作都适用于超级表,语法完全一样。 + +### 示例一 + +在 [TDengine CLI](../../tools/cli) ,查找加利福尼亚州所有智能电表采集的电压平均值,并按照 location 分组。 + +```sql title="SQL" +SELECT AVG(voltage), location FROM test.meters GROUP BY location; +``` + +```txt title="output" + location | avg(voltage) | +======================================================= + California.PaloAlto | 109.507000000 | + California.Sunnyvale | 109.507000000 | + California.MountainView | 109.507000000 | + California.SanFrancisco | 109.507000000 | + California.SanJose | 109.507000000 | + California.SanDiego | 109.507000000 | + California.SantaClara | 109.507000000 | + California.Cupertino | 109.507000000 | + California.Campbell | 109.507000000 | + California.LosAngles | 109.507000000 | +Query OK, 10 row(s) in set +``` + +### 示例二 + +在 TDengine CLI `taos`, 查找 groupId 为 2 的所有智能电表过去24小时的记录条数和电流的最大值。 + +```sql title="SQL" +SELECT count(*), max(current) FROM test.meters where groupId = 2 and ts > now - 24h; +``` + +```txt title="output" + count(*) | max(current) | +================================== + 5 | 13.4 | +Query OK, 1 row(s) in set (0.002136s) +``` + +在 [TDengine SQL 的数据查询](https://docs.taosdata.com/cloud/taos-sql/select) 一章,查询类操作都会注明是否支持超级表。 + +## 降采样查询、插值 + +物联网场景里,经常需要通过降采样(down sampling)将采集的数据按时间段进行聚合。TDengine 提供了一个简便的关键词 interval 让按照时间窗口的查询操作变得极为简单。比如,将智能电表 d1001 采集的电流值每 10 秒钟求和 + +```sql title="SQL" +taos> SELECT _wstart, sum(current) FROM test.d1001 INTERVAL(10s); +``` + +```txt title="output" + _wstart | sum(current) | +====================================================== + 2018-10-03 14:38:00.000 | 10.300000191 | + 2018-10-03 14:38:10.000 | 24.900000572 | +Query OK, 2 rows in database (0.003139s) +``` + +降采样操作也适用于超级表,比如:将加利福尼亚州所有智能电表采集的电流值每秒钟求和 + +```sql title="SQL" +SELECT _wstart, SUM(current) FROM test.meters where location like "California%" INTERVAL(1s); +``` + +```txt title="output" + _wstart | sum(current) | +====================================================== + 2018-10-03 14:38:04.000 | 10.199999809 | + 2018-10-03 14:38:05.000 | 23.699999809 | + 2018-10-03 14:38:06.000 | 11.500000000 | + 2018-10-03 14:38:15.000 | 12.600000381 | + 2018-10-03 14:38:16.000 | 34.400000572 | +Query OK, 5 rows in database (0.007413s) +``` + +降采样操作也支持时间偏移,比如:将所有智能电表采集的电流值每秒钟求和,但要求每个时间窗口从 500 毫秒开始 + +```sql title="SQL" +SELECT _wstart, SUM(current) FROM test.meters INTERVAL(1s, 500a); +``` + +```txt title="output" + _wstart | sum(current) | +====================================================== + 2018-10-03 14:38:03.500 | 10.199999809 | + 2018-10-03 14:38:04.500 | 10.300000191 | + 2018-10-03 14:38:05.500 | 13.399999619 | + 2018-10-03 14:38:06.500 | 11.500000000 | + 2018-10-03 14:38:14.500 | 12.600000381 | + 2018-10-03 14:38:16.500 | 34.400000572 | +Query OK, 6 rows in database (0.005515s) +``` + +物联网场景里,每个数据采集点采集数据的时间是难同步的,但很多分析算法(比如 FFT)需要把采集的数据严格按照时间等间隔的对齐,在很多系统里,需要应用自己写程序来处理,但使用 TDengine 的降采样操作就轻松解决。 + +如果一个时间间隔里,没有采集的数据,TDengine 还提供插值计算的功能。 + +语法规则细节请见 [TDengine SQL 的按时间窗口切分聚合](https://docs.taosdata.com/cloud/taos-sql/interval) 章节。 + +## 连接器样例 + +:::note +在执行下面样例代码的之前,您必须首先建立和 TDengine Cloud 的连接,请参考 [连接 云服务](../../programming/connect/). + +::: + + + + +在这个例子里面,我们使用 `query` 方法去执行 SQL,然后获取 `result` 对象。 + +```python +{{#include docs/examples/python/develop_tutorial.py:query:nrc}} +``` + +从 `result` 对象里面获取列的元数据,包括列名,列类型和列的长度。 + +```python +{{#include docs/examples/python/develop_tutorial.py:fields:nrc}} +``` + +从 `result` 获得总行数: + +```python +{{#include docs/examples/python/develop_tutorial.py:rows:nrc}} +``` + +在每一行上面迭代: + +```python +{{#include docs/examples/python/develop_tutorial.py:iter}} +``` + + + + +在这个例子中,我们使用 `Statement` 对象的 `executeQuery` 方法并获取 `ResultSet` 对象。 + +```java +{{#include docs/examples/java/src/main/java/com/taos/example/CloudTutorial.java:query:nrc}} +``` + +从结果里面得到列的元数据: + +```java +{{#include docs/examples/java/src/main/java/com/taos/example/CloudTutorial.java:meta:nrc}} +``` + +在结果上面迭代打印每一行数据: +```java +{{#include docs/examples/java/src/main/java/com/taos/example/CloudTutorial.java:iter}} +``` + + + + +在这个例子中,我们使用 `Query` 方法执行 SQL 并获取了一个 `sql.Rows` 对象。 + +```go +{{#include docs/examples/go/tutorial/main.go:query:nrc}} +``` + +从结果行里面获取列名: + +```go +{{#include docs/examples/go/tutorial/main.go:meta:nrc}} +``` + +在每一行上面迭代并打印每一行数据: + +```go +{{#include docs/examples/go/tutorial/main.go:iter}} +``` + + + + +在这个例子里面,我们使用查询方法来执行 SQL ,然后获取到 result 对象。 + +```rust +{{#include docs/examples/rust/cloud-example/examples/tutorial.rs:query:nrc}} +``` + +从结果里面获取列的元数据: + +```rust +{{#include docs/examples/rust/cloud-example/examples/tutorial.rs:meta:nrc}} +``` + +获取所以的行数据并打印每一行数据: + +```rust +{{#include docs/examples/rust/cloud-example/examples/tutorial.rs:iter}} +``` + + + + +```javascript +{{#include docs/examples/node/query.js}} +``` + + + + + +在这个例子里面,我们使用查询方法来执行 SQL ,然后获取到 result 对象。 + +``` XML +{{#include docs/examples/csharp/cloud-example/inout/inout.csproj}} +``` + +```C# +{{#include docs/examples/csharp/cloud-example/inout/Program.cs:query}} +``` + + + + \ No newline at end of file diff --git a/docs/zh/10-programming/06-connector/01-python.md b/docs/zh/10-programming/06-connector/01-python.md new file mode 100644 index 0000000000000000000000000000000000000000..e5b2399817f75fc33d92dfcb4ee3d352a1338d2f --- /dev/null +++ b/docs/zh/10-programming/06-connector/01-python.md @@ -0,0 +1,109 @@ +--- +sidebar_label: Python +title: TDengine Python 连接器 +description: "taospy 是 TDengine 的官方 Python 连接器。taospy 提供了丰富的 API, 使得 Python 应用可以很方便地使用 TDengine。tasopy 对 TDengine 的原生接口和 REST 接口都进行了封装, 分别对应 tasopy 的两个子模块:taos 和 taosrest。除了对原生接口和 REST 接口的封装,taospy 还提供了符合 Python 数据访问规范(PEP 249)的编程接口。这使得 taospy 和很多第三方工具集成变得简单,比如 SQLAlchemy 和 pandas" +--- + +`taospy` 是 TDengine 的官方 Python 连接器。`taospy` 提供了丰富的 API, 使得 Python 应用可以很方便地使用 TDengine。`taospy` 对 TDengine 的[原生接口](../cpp)和 [REST 接口](../rest-api)都进行了封装, 分别对应 `taospy` 包的 `taos` 模块 和 `taosrest` 模块。 +除了对原生接口和 REST 接口的封装,`taospy` 还提供了符合 [Python 数据访问规范(PEP 249)](https://peps.python.org/pep-0249/) 的编程接口。这使得 `taospy` 和很多第三方工具集成变得简单,比如 [SQLAlchemy](https://www.sqlalchemy.org/) 和 [pandas](https://pandas.pydata.org/)。 + +Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-connector-python)。 + +## 安装 + +### 准备 + +1. 安装 Python。新近版本 taospy 包要求 Python 3.6+。早期版本 taospy 包要求 Python 3.7+。taos-ws-py 包要求 Python 3.7+。如果系统上还没有 Python 可参考 [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) 安装。 +2. 安装 [pip](https://pypi.org/project/pip/)。大部分情况下 Python 的安装包都自带了 pip 工具, 如果没有请参考 [pip documentation](https://pip.pypa.io/en/stable/installation/) 安装。 + +### 使用 pip 安装 + +```bash +pip3 install -U taospy[ws] +``` + +### 使用 conda 安装 + +```bash +conda install -c conda-forge taospy taospyws +``` + +### 安装验证 + +只需验证是否能成功导入 `taosrest` 模块。可在 Python 交互式 Shell 中输入: + +```python +import taosrest +``` + +## 建立连接 + +```python +{{#include docs/examples/python/reference_connection.py:connect}} +``` + +`connect()` 函数的所有参数都是可选的关键字参数。下面是连接参数的具体说明: + +- `url`: TDengine Cloud 的URL。 +- `token`: TDengine Cloud 的令牌. +- `timeout`: HTTP 请求超时时间。单位为秒。默认为 `socket._GLOBAL_DEFAULT_TIMEOUT`。 一般无需配置。 + +## 示例程序 + +### 使用 TaosRestConnection 类 + +```python +{{#include docs/examples/python/reference_connection.py:example}} +``` + +### 使用 TaosRestCursor 类 + +`TaosRestCursor` 类是对 PEP249 Cursor 接口的实现。 + +```python +{{#include docs/examples/python/reference_cursor.py:basic}} +``` + +- `cursor.execute` : 用来执行任意 SQL 语句。 +- `cursor.rowcount`: 对于写入操作返回写入成功记录数。对于查询操作,返回结果集行数。 +- `cursor.description` : 返回字段的描述信息。关于描述信息的具体格式请参考[TaosRestCursor](https://docs.taosdata.com/api/taospy/taosrest/cursor.html)。 + +##### RestClient 类的使用 + +`RestClient` 类是对于 [REST API](../rest-api) 的直接封装。它只包含一个 `sql()` 方法用于执行任意 SQL 语句, 并返回执行结果。 + +```python +{{#include docs/examples/python/reference_rest_client.py}} +``` + +对于 `sql()` 方法更详细的介绍, 请参考 [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html)。 + +## 其它说明 + +### 异常处理 + +所有数据库操作如果出现异常,都会直接抛出来。由应用程序负责异常处理。比如: + +```python +{{#include docs/examples/python/handle_exception.py}} +``` + +### 关于纳秒 (nanosecond) + +由于目前 Python 对 nanosecond 支持的不完善(见下面的链接),目前的实现方式是在 nanosecond 精度时返回整数,而不是 ms 和 us 返回的 datetime 类型,应用开发者需要自行处理,建议使用 pandas 的 to_datetime()。未来如果 Python 正式完整支持了纳秒,Python 连接器可能会修改相关接口。 + +1. https://stackoverflow.com/questions/10611328/parsing-datetime-strings-containing-nanoseconds +2. https://www.python.org/dev/peps/pep-0564/ + +## 重要更新 + +[**Release Notes**](https://github.com/taosdata/taos-connector-python/releases) + +## API 参考 + +- [taos](https://docs.taosdata.com/api/taospy/taos/) +- [taosrest](https://docs.taosdata.com/api/taospy/taosrest) + +## 常见问题 + +欢迎[提问或报告问题](https://github.com/taosdata/taos-connector-python/issues)。 diff --git a/docs/zh/10-programming/06-connector/02-java.md b/docs/zh/10-programming/06-connector/02-java.md new file mode 100644 index 0000000000000000000000000000000000000000..9db93699a6d323b6fbc6a507c9d22d0a3962a698 --- /dev/null +++ b/docs/zh/10-programming/06-connector/02-java.md @@ -0,0 +1,347 @@ +--- +toc_max_heading_level: 4 +sidebar_label: Java +title: TDengine Java Connector +description: TDengine Java 连接器基于标准 JDBC API 实现, 并提供原生连接与 REST连接两种连接器。 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +`taos-jdbcdriver` 是 TDengine 的官方 Java 语言连接器,Java 开发人员可以通过它开发存取 TDengine 数据库的应用软件。`taos-jdbcdriver` 实现了 JDBC driver 标准的接口,通过 REST 接口连接 TDengine 实例。 + +## TDengine DataType 和 Java DataType + +TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下: + +| TDengine DataType | JDBCType | +| ----------------- | ---------------------------------- | +| TIMESTAMP | java.sql.Timestamp | +| INT | java.lang.Integer | +| BIGINT | java.lang.Long | +| FLOAT | java.lang.Float | +| DOUBLE | java.lang.Double | +| SMALLINT | java.lang.Short | +| TINYINT | java.lang.Byte | +| BOOL | java.lang.Boolean | +| BINARY | byte array | +| NCHAR | java.lang.String | +| JSON | java.lang.String | + +**注意**:JSON 类型仅在 tag 中支持。 + +## 安装步骤 + +### 安装前准备 + +使用 Java Connector 连接数据库前,需要具备以下条件: + +- 已安装 Java 1.8 或以上版本运行时环境和 Maven 3.6 或以上版本 +- 已安装 TDengine 客户端驱动(使用原生连接必须安装,使用 REST 连接无需安装),具体步骤请参考[安装客户端驱动](../#安装客户端驱动) + +### 安装连接器 + + + + +目前 taos-jdbcdriver 已经发布到 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) 仓库,且各大仓库都已同步。 + +- [sonatype](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) +- [mvnrepository](https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver) +- [maven.aliyun](https://maven.aliyun.com/mvn/search) + +Maven 项目中,在 pom.xml 中添加以下依赖: + +```xml-dtd + + com.taosdata.jdbc + taos-jdbcdriver + 3.0.0 + +``` + + + + +可以通过下载 TDengine 的源码,自己编译最新版本的 Java connector + +```shell +git clone https://github.com/taosdata/taos-connector-jdbc.git +cd taos-connector-jdbc +mvn clean install -Dmaven.test.skip=true +``` + +编译后,在 target 目录下会产生 taos-jdbcdriver-3.0.*-dist.jar 的 jar 包,并自动将编译的 jar 文件放在本地的 Maven 仓库中。 + + + + +## 建立连接 + +TDengine 的 JDBC URL 规范格式为: +`jdbc:TAOS-RS://[host_name]:[port]/[database_name]?batchfetch={true|false}&useSSL={true|false}&token={token}&httpPoolSize={httpPoolSize}&httpKeepAlive={true|false}]&httpConnectTimeout={httpTimeout}&httpSocketTimeout={socketTimeout}` + +```java +Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); +String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata"; +Connection conn = DriverManager.getConnection(jdbcUrl); +``` + +:::note + +- REST 接口是无状态的。在使用 JDBC REST 连接时,需要在 SQL 中指定表、超级表的数据库名称。例如: + +```sql +INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFrancisco') VALUES(now, 24.6); +``` + +- 如果在 URL 中指定了 dbname,那么 JDBC REST 连接会默认使用 `/rest/sql/dbname` 作为 restful 请求的 URL,在 SQL 中不需要指定 dbname。例如:URL 为 jdbc:TAOS-RS://127.0.0.1:6041/test,那么,可以执行 SQL: + + ```sql + insert into t1 using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6); + ``` + +::: + +### 指定 URL 和 Properties 获取连接 + +除了通过指定的 URL 获取连接,还可以使用 Properties 指定建立连接时的参数。 + +**注意**: + +- 应用中设置的 client parameter 为进程级别的,即如果要更新 client 的参数,需要重启应用。这是因为 client parameter 是全局参数,仅在应用程序的第一次设置生效。 +- 以下示例代码基于 taos-jdbcdriver-3.0.0。 + +```java +public Connection getRestConn() throws Exception{ + Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); + String jdbcUrl = System.getenv("TDENGINE_JDBC_URL"); + Properties connProps = new Properties(); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_BATCH_LOAD, "true"); + Connection conn = DriverManager.getConnection(jdbcUrl, connProps); + return conn; +} +``` + +### 配置参数的优先级 + +通过前面三种方式获取连接,如果配置参数在 url、Properties、客户端配置文件中有重复,则参数的`优先级由高到低`分别如下: + +1. JDBC URL 参数,如上所述,可以在 JDBC URL 的参数中指定。 +2. Properties connProps + +## 使用示例 + +### 创建数据库和表 + +```java +Statement stmt = conn.createStatement(); + +// create database +stmt.executeUpdate("create database if not exists db"); + +// use database +stmt.executeUpdate("use db"); + +// create table +stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)"); +``` + +### 插入数据 + +```java +// insert data +int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now + 1s, 20, 9.3)"); + +System.out.println("insert " + affectedRows + " rows."); +``` + +> `now` 为系统内部函数,默认为客户端所在计算机当前时间。 +> `now + 1s` 代表客户端当前时间往后加 1 秒,数字后面代表时间单位:a(毫秒),s(秒),m(分),h(小时),d(天),w(周),n(月),y(年)。 + +### 查询数据 + +```java +// query data +ResultSet resultSet = stmt.executeQuery("select * from tb"); + +Timestamp ts = null; +int temperature = 0; +float humidity = 0; +while(resultSet.next()){ + + ts = resultSet.getTimestamp(1); + temperature = resultSet.getInt(2); + humidity = resultSet.getFloat("humidity"); + + System.out.printf("%s, %d, %s\n", ts, temperature, humidity); +} +``` + +> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。 + +### 处理异常 + +在报错后,通过 SQLException 可以获取到错误的信息和错误码: + +```java +try (Statement statement = connection.createStatement()) { + // executeQuery + ResultSet resultSet = statement.executeQuery(sql); + // print result + printResult(resultSet); +} catch (SQLException e) { + System.out.println("ERROR Message: " + e.getMessage()); + System.out.println("ERROR Code: " + e.getErrorCode()); + e.printStackTrace(); +} +``` + +JDBC 连接器可能报错的错误码包括 3 种:JDBC driver 本身的报错(错误码在 0x2301 到 0x2350 之间),原生连接方法的报错(错误码在 0x2351 到 0x2400 之间),TDengine 其他功能模块的报错。 + +具体的错误码请参考: + +- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java) + +### 关闭资源 + +```java +resultSet.close(); +stmt.close(); +conn.close(); +``` + +:::note + 请确保关闭连接,否则会造成连接池泄露。 +::: + +### 与连接池使用 + +#### HikariCP + +使用示例如下: + +```java + public static void main(String[] args) throws SQLException { + HikariConfig config = new HikariConfig(); + // jdbc properties + config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log"); + config.setUsername("root"); + config.setPassword("taosdata"); + // connection pool configurations + config.setMinimumIdle(10); //minimum number of idle connection + config.setMaximumPoolSize(10); //maximum number of connection in the pool + config.setConnectionTimeout(30000); //maximum wait milliseconds for get connection from pool + config.setMaxLifetime(0); // maximum life time for each connection + config.setIdleTimeout(0); // max idle time for recycle idle connection + config.setConnectionTestQuery("select server_status()"); //validation query + + HikariDataSource ds = new HikariDataSource(config); //create datasource + + Connection connection = ds.getConnection(); // get connection + Statement statement = connection.createStatement(); // get statement + + //query or insert + // ... + + connection.close(); // put back to connection pool +} +``` + +> 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。 +> 更多 HikariCP 使用问题请查看[官方说明](https://github.com/brettwooldridge/HikariCP)。 + +#### Druid + +使用示例如下: + +```java +public static void main(String[] args) throws Exception { + + DruidDataSource dataSource = new DruidDataSource(); + // jdbc properties + dataSource.setDriverClassName("com.taosdata.jdbc.TSDBDriver"); + dataSource.setUrl(url); + dataSource.setUsername("root"); + dataSource.setPassword("taosdata"); + // pool configurations + dataSource.setInitialSize(10); + dataSource.setMinIdle(10); + dataSource.setMaxActive(10); + dataSource.setMaxWait(30000); + dataSource.setValidationQuery("select server_status()"); + + Connection connection = dataSource.getConnection(); // get connection + Statement statement = connection.createStatement(); // get statement + //query or insert + // ... + + connection.close(); // put back to connection pool +} +``` + +> 更多 druid 使用问题请查看[官方说明](https://github.com/alibaba/druid)。 + +### 更多示例程序 + +示例程序源码位于 `TDengine/examples/JDBC` 下: + +- JDBCDemo:JDBC 示例源程序。 +- JDBCConnectorChecker:JDBC 安装校验源程序及 jar 包。 +- connectionPools:HikariCP, Druid, dbcp, c3p0 等连接池中使用 taos-jdbcdriver。 +- SpringJdbcTemplate:Spring JdbcTemplate 中使用 taos-jdbcdriver。 +- mybatisplus-demo:Springboot + Mybatis 中使用 taos-jdbcdriver。 + +请参考:[JDBC example](https://github.com/taosdata/TDengine/tree/3.0/examples/JDBC) + +## 最近更新记录 + +| taos-jdbcdriver 版本 | 主要变化 | +| :------------------: | :----------------------------: | +| 3.0.3 | 修复 REST 连接在 jdk17+ 版本时间戳解析错误问题 | +| 3.0.1 - 3.0.2 | 修复一些情况下结果集数据解析错误的问题。3.0.1 在 JDK 11 环境编译,JDK 8 环境下建议使用 3.0.2 版本 | +| 3.0.0 | 支持 TDengine 3.0 | +| 2.0.42 | 修在 WebSocket 连接中 wasNull 接口返回值 | +| 2.0.41 | 修正 REST 连接中用户名和密码转码方式 | +| 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 | +| 2.0.38 | JDBC REST 连接增加批量拉取功能 | +| 2.0.37 | 增加对 json tag 支持 | +| 2.0.36 | 增加对 schemaless 写入支持 | + +## 常见问题 + +1. 使用 Statement 的 `addBatch()` 和 `executeBatch()` 来执行“批量写入/更新”,为什么没有带来性能上的提升? + +**原因**:TDengine 的 JDBC 实现中,通过 `addBatch` 方法提交的 SQL 语句,会按照添加的顺序,依次执行,这种方式没有减少与服务端的交互次数,不会带来性能上的提升。 + +**解决方法**:1. 在一条 insert 语句中拼接多个 values 值;2. 使用多线程的方式并发插入;3. 使用参数绑定的写入方式 + +2. java.lang.UnsatisfiedLinkError: no taos in java.library.path + +**原因**:程序没有找到依赖的本地函数库 taos。 + +**解决方法**:Windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下,Linux 下将建立如下软链 `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可,macOS 下需要建立软链 `ln -s /usr/local/lib/libtaos.dylib`。 + +3. java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform + +**原因**:目前 TDengine 只支持 64 位 JDK。 + +**解决方法**:重新安装 64 位 JDK。 + +4. java.lang.NoSuchMethodError: setByteArray + +**原因**:taos-jdbcdriver 3.* 版本仅支持 TDengine 3.0 及以上版本。 + +**解决方法**: 使用 taos-jdbcdriver 2.* 版本连接 TDengine 2.* 版本。 + +5. java.lang.NoSuchMethodError: java.nio.ByteBuffer.position(I)Ljava/nio/ByteBuffer; ... taos-jdbcdriver-3.0.1.jar + +**原因**:taos-jdbcdriver 3.0.1 版本需要在 JDK 11+ 环境使用。 + +**解决方法**: 更换 taos-jdbcdriver 3.0.2+ 版本。 + +其它问题请参考 [FAQ](../../../train-faq/faq) + +## API 参考 + +[taos-jdbcdriver doc](https://docs.taosdata.com/api/taos-jdbcdriver) diff --git a/docs/zh/10-programming/06-connector/03-go.md b/docs/zh/10-programming/06-connector/03-go.md new file mode 100644 index 0000000000000000000000000000000000000000..ff556b0c127690dc35c2d493f873817a4ef0b88c --- /dev/null +++ b/docs/zh/10-programming/06-connector/03-go.md @@ -0,0 +1,200 @@ +--- +toc_max_heading_level: 4 +sidebar_position: 4 +sidebar_label: Go +title: TDengine Go Connector +--- + +`driver-go` 是 TDengine 的官方 Go 语言连接器,实现了 Go 语言 [database/sql](https://golang.org/pkg/database/sql/) 包的接口。Go 开发人员可以通过它开发存取 TDengine 集群数据的应用软件。 + +本文介绍如何安装 `driver-go`,并通过 `driver-go` 连接 TDengine Cloud 集群、进行数据查询、数据写入等基本操作。 + +`driver-go` 的源码托管在 [GitHub](https://github.com/taosdata/driver-go)。 + +## 版本支持 + +请参考[版本支持列表](../#版本支持) + +## 安装步骤 + +### 安装前准备 + +* 安装 Go 开发环境(Go 1.14 及以上,GCC 4.8.5 及以上) +* 如果使用原生连接器,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../#安装客户端驱动) + +配置好环境变量,检查命令: + +* ```go env``` +* ```gcc -v``` + +### 使用 go get 安装 + +`go get -u github.com/taosdata/driver-go/v3@latest` + +### 使用 go mod 管理 + +1. 使用 `go mod` 命令初始化项目: + + ```text + go mod init taos-demo + ``` + +2. 引入 taosSql : + + ```go + import ( + "database/sql" + _ "github.com/taosdata/driver-go/v3/taosSql" + ) + ``` + +3. 使用 `go mod tidy` 更新依赖包: + + ```text + go mod tidy + ``` + +4. 使用 `go run taos-demo` 运行程序或使用 `go build` 命令编译出二进制文件。 + + ```text + go run taos-demo + go build + ``` + +## 建立连接 + +### 数据源名称(DSN) + +数据源名称具有通用格式,例如 [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php),但没有类型前缀(方括号表示可选): + +``` text +[username[:password]@][protocol[(address)]]/[dbname][?param1=value1&...¶mN=valueN] +``` + +完整形式的 DSN: + +```text +username:password@protocol(address)/dbname?param=value +``` + +### 使用连接器进行连接 + +_taosRestful_ 通过 `http client` 实现了 Go 的 `database/sql/driver` 接口。只需要引入驱动就可以使用[`database/sql`](https://golang.org/pkg/database/sql/)的接口。 + +使用 `taosRestful` 作为 `driverName` 并且使用一个正确的 [DSN](#DSN) 作为 `dataSourceName`,DSN 支持的参数: + +* `disableCompression` 是否接受压缩数据,默认为 true 不接受压缩数据,如果传输数据使用 gzip 压缩设置为 false。 +* `readBufferSize` 读取数据的缓存区大小默认为 4K(4096),当查询结果数据量多时可以适当调大该值。 + +## 使用示例 + +### 更多示例程序 + +* [示例程序](https://github.com/taosdata/driver-go/tree/3.0/examples) +* [视频教程](https://www.taosdata.com/blog/2020/11/11/1951.html)。 + +## 使用限制 + +由于 REST 接口无状态所以 `use db` 语法不会生效,需要将 db 名称放到 SQL 语句中,如:`create table if not exists tb1 (ts timestamp, a int)`改为`create table if not exists test.tb1 (ts timestamp, a int)`否则将报错`[0x217] Database not specified or available`。 + +也可以将 db 名称放到 DSN 中,将 `root:taosdata@http(localhost:6041)/` 改为 `root:taosdata@http(localhost:6041)/test`。当指定的 db 不存在时执行 `create database` 语句不会报错,而执行针对该 db 的其他查询或写入操作会报错。 + +完整示例如下: + +```go +package main + +import ( + "database/sql" + "fmt" + "time" + + _ "github.com/taosdata/driver-go/v3/taosRestful" +) + +func main() { + var taosDSN = "root:taosdata@http(localhost:6041)/test" + taos, err := sql.Open("taosRestful", taosDSN) + if err != nil { + fmt.Println("failed to connect TDengine, err:", err) + return + } + defer taos.Close() + taos.Exec("create database if not exists test") + taos.Exec("create table if not exists tb1 (ts timestamp, a int)") + _, err = taos.Exec("insert into tb1 values(now, 0)(now+1s,1)(now+2s,2)(now+3s,3)") + if err != nil { + fmt.Println("failed to insert, err:", err) + return + } + rows, err := taos.Query("select * from tb1") + if err != nil { + fmt.Println("failed to select from table, err:", err) + return + } + + defer rows.Close() + for rows.Next() { + var r struct { + ts time.Time + a int + } + err := rows.Scan(&r.ts, &r.a) + if err != nil { + fmt.Println("scan error:\n", err) + return + } + fmt.Println(r.ts, r.a) + } +} +``` + +## 常见问题 + +1. database/sql 中 stmt(参数绑定)相关接口崩溃 + + REST 不支持参数绑定相关接口,建议使用`db.Exec`和`db.Query`。 + +2. 使用 `use db` 语句后执行其他语句报错 `[0x217] Database not specified or available` + + 在 REST 接口中 SQL 语句的执行无上下文关联,使用 `use db` 语句不会生效,解决办法见上方使用限制章节。 + +3. 使用 taosSql 不报错使用 taosRestful 报错 `[0x217] Database not specified or available` + + 因为 REST 接口无状态,使用 `use db` 语句不会生效,解决办法见上方使用限制章节。 + +4. `readBufferSize` 参数调大后无明显效果 + + `readBufferSize` 调大后会减少获取结果时 `syscall` 的调用。如果查询结果的数据量不大,修改该参数不会带来明显提升,如果该参数修改过大,瓶颈会在解析 JSON 数据。如果需要优化查询速度,需要根据实际情况调整该值来达到查询效果最优。 + +5. `disableCompression` 参数设置为 `false` 时查询效率降低 + + 当 `disableCompression` 参数设置为 `false` 时查询结果会使用 `gzip` 压缩后传输,拿到数据后要先进行 `gzip` 解压。 + +6. `go get` 命令无法获取包,或者获取包超时 + + 设置 Go 代理 `go env -w GOPROXY=https://goproxy.cn,direct`。 + +## 常用 API + +### database/sql API + +* `sql.Open(DRIVER_NAME string, dataSourceName string) *DB` + + 该 API 用来打开 DB,返回一个类型为 \*DB 的对象。 + +:::info +该 API 成功创建的时候,并没有做权限等检查,只有在真正执行 Query 或者 Exec 的时候才能真正的去创建连接,并同时检查 user/password/host/port 是不是合法。 +::: + +* `func (db *DB) Exec(query string, args ...interface{}) (Result, error)` + + `sql.Open` 内置的方法,用来执行非查询相关 SQL。 + +* `func (db *DB) Query(query string, args ...interface{}) (*Rows, error)` + + `sql.Open` 内置的方法,用来执行查询语句。 + +## API 参考 + +全部 API 见 [driver-go 文档](https://pkg.go.dev/github.com/taosdata/driver-go/v3) diff --git a/docs/zh/10-programming/06-connector/04-rust.md b/docs/zh/10-programming/06-connector/04-rust.md new file mode 100644 index 0000000000000000000000000000000000000000..7694381c743c5a2660e2bfb3ceb5cf9936d5410d --- /dev/null +++ b/docs/zh/10-programming/06-connector/04-rust.md @@ -0,0 +1,226 @@ +--- +toc_max_heading_level: 4 +sidebar_position: 5 +sidebar_label: Rust +title: TDengine Rust Connector +--- + +[![Crates.io](https://img.shields.io/crates/v/taos)](https://crates.io/crates/taos) ![Crates.io](https://img.shields.io/crates/d/taos) [![docs.rs](https://img.shields.io/docsrs/taos)](https://docs.rs/taos) + +`taos` 是 TDengine 的官方 Rust 语言连接器。Rust 开发人员可以通过它开发存取 TDengine 数据库的应用软件。 + +该 Rust 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-connector-rust)。 + +## 版本支持 + +请参考[版本支持列表](../#版本支持) + +Rust 连接器仍然在快速开发中,1.0 之前无法保证其向后兼容。建议使用 3.0 版本以上的 TDengine,以避免已知问题。 + +## 安装 + +### 安装前准备 + +安装 Rust 开发工具链 + +### 添加 taos 依赖 + +根据选择的连接方式,按照如下说明在 [Rust](https://rust-lang.org) 项目中添加 [taos][taos] 依赖: + +在 `Cargo.toml` 文件中添加 [taos][taos]: + +```toml +[dependencies] +# use default feature +taos = "*" +``` + +## 建立连接 + +[TaosBuilder] 通过 DSN 连接描述字符串创建一个连接构造器。DSN 有下面的格式组成`://[:port]?token=`。 + +```rust +let builder = TaosBuilder::from_dsn(DSN)?; +``` + +现在您可以使用该对象创建连接: + +```rust +let conn = builder.build()?; +``` + +连接对象可以创建多个: + +```rust +let conn1 = builder.build()?; +let conn2 = builder.build()?; +``` + +建立连接后,您可以进行相关数据库操作: + +```rust +async fn demo(taos: &Taos, db: &str) -> Result<(), Error> { + // prepare database + taos.exec_many([ + format!("DROP DATABASE IF EXISTS `{db}`"), + format!("CREATE DATABASE `{db}`"), + format!("USE `{db}`"), + ]) + .await?; + + let inserted = taos.exec_many([ + // create super table + "CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \ + TAGS (`groupid` INT, `location` BINARY(24))", + // create child table + "CREATE TABLE `d0` USING `meters` TAGS(0, 'California.LosAngles')", + // insert into child table + "INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)", + // insert with NULL values + "INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)", + // insert and automatically create table with tags if not exists + "INSERT INTO `d1` USING `meters` TAGS(1, 'California.SanFrancisco') values(now - 9s, 10.1, 119, 0.33)", + // insert many records in a single sql + "INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)", + ]).await?; + + assert_eq!(inserted, 6); + let mut result = taos.query("select * from `meters`").await?; + + for field in result.fields() { + println!("got field: {}", field.name()); + } + + let values = result. +} +``` + +## API 参考 + +### 连接池 + +在复杂应用中,建议启用连接池。[taos] 的连接池使用 [r2d2] 实现。 + +如下,可以生成一个默认参数的连接池。 + +```rust +let pool = TaosBuilder::from_dsn(dsn)?.pool()?; +``` + +同样可以使用连接池的构造器,对连接池参数进行设置: + +```rust +let dsn = "taos://localhost:6030"; + +let opts = PoolBuilder::new() + .max_size(5000) // max connections + .max_lifetime(Some(Duration::from_secs(60 * 60))) // lifetime of each connection + .min_idle(Some(1000)) // minimal idle connections + .connection_timeout(Duration::from_secs(2)); + +let pool = TaosBuilder::from_dsn(dsn)?.with_pool_builder(opts)?; +``` + +在应用代码中,使用 `pool.get()?` 来获取一个连接对象 [Taos]。 + +```rust +let taos = pool.get()?; +``` + +### 连接 + +[Taos][struct.Taos] 对象提供了多个数据库操作的 API: + +1. `exec`: 执行某个非查询类 SQL 语句,例如 `CREATE`,`ALTER`,`INSERT` 等。 + + ```rust + let affected_rows = taos.exec("INSERT INTO tb1 VALUES(now, NULL)").await?; + ``` + +2. `exec_many`: 同时(顺序)执行多个 SQL 语句。 + + ```rust + taos.exec_many([ + "CREATE DATABASE test", + "USE test", + "CREATE TABLE `tb1` (`ts` TIMESTAMP, `val` INT)", + ]).await?; + ``` + +3. `query`:执行查询语句,返回 [ResultSet] 对象。 + + ```rust + let mut q = taos.query("select * from log.logs").await?; + ``` + + [ResultSet] 对象存储了查询结果数据和返回的列的基本信息(列名,类型,长度): + + 列信息使用 [.fields()] 方法获取: + + ```rust + let cols = q.fields(); + for col in cols { + println!("name: {}, type: {:?} , bytes: {}", col.name(), col.ty(), col.bytes()); + } + ``` + + 逐行获取数据: + + ```rust + let mut rows = result.rows(); + let mut nrows = 0; + while let Some(row) = rows.try_next().await? { + for (col, (name, value)) in row.enumerate() { + println!( + "[{}] got value in col {} (named `{:>8}`): {}", + nrows, col, name, value + ); + } + nrows += 1; + } + ``` + + 或使用 [serde](https://serde.rs) 序列化框架。 + + ```rust + #[derive(Debug, Deserialize)] + struct Record { + // deserialize timestamp to chrono::DateTime + ts: DateTime, + // float to f32 + current: Option, + // int to i32 + voltage: Option, + phase: Option, + groupid: i32, + // binary/varchar to String + location: String, + } + + let records: Vec = taos + .query("select * from `meters`") + .await? + .deserialize() + .try_collect() + .await?; + ``` + +需要注意的是,需要使用 Rust 异步函数和异步运行时。 + +[Taos][struct.Taos] 提供部分 SQL 的 Rust 方法化以减少 `format!` 代码块的频率: + +- `.describe(table: &str)`: 执行 `DESCRIBE` 并返回一个 Rust 数据结构。 +- `.create_database(database: &str)`: 执行 `CREATE DATABASE` 语句。 +- `.use_database(database: &str)`: 执行 `USE` 语句。 + +除此之外,该结构也是 [参数绑定](#参数绑定接口) 和 [行协议接口](#行协议接口) 的入口,使用方法请参考具体的 API 说明。 + + +其他相关结构体 API 使用说明请移步 Rust 文档托管网页:。 + +[taos]: https://github.com/taosdata/rust-connector-taos +[r2d2]: https://crates.io/crates/r2d2 +[TaosBuilder]: https://docs.rs/taos/latest/taos/struct.TaosBuilder.html +[TaosCfg]: https://docs.rs/taos/latest/taos/struct.TaosCfg.html +[struct.Taos]: https://docs.rs/taos/latest/taos/struct.Taos.html +[Stmt]: https://docs.rs/taos/latest/taos/struct.Stmt.html diff --git a/docs/zh/10-programming/06-connector/05-node.md b/docs/zh/10-programming/06-connector/05-node.md new file mode 100644 index 0000000000000000000000000000000000000000..2e7fe0d18cfa99ea9237b4db44019ea69ea07949 --- /dev/null +++ b/docs/zh/10-programming/06-connector/05-node.md @@ -0,0 +1,75 @@ +--- +toc_max_heading_level: 4 +sidebar_label: Node.js +title: TDengine Node.js Connector +--- + +`@tdengine/rest` 是 TDengine 的官方 Node.js 语言连接器。 Node.js 开发人员可以通过它开发可以存取 TDengine 集群数据的应用软件。另外`@tdengine/rest` 是 **REST 连接器**,它通过 REST 接口连接 TDengine 的运行实例。 + +Node.js 连接器源码托管在 [GitHub](https://github.com/taosdata/taos-connector-node/tree/3.0)。 + +## 版本支持 + +请参考[版本支持列表](../#版本支持) + +## 安装步骤 + +### 安装前准备 + +### 使用 npm 安装 + +```bash +npm install @tdengine/rest +``` + +## 建立连接 + +```javascript +{{#include docs/examples/node/connect.js}} +``` + +## 更多示例程序 + +```javascript +{{#include docs/examples/node/reference_example.js:usage}} +``` + +## 常见问题 + +1. 使用 REST 连接需要启动 taosadapter。 + + ```bash + sudo systemctl start taosadapter + ``` + +2. Node.js 版本 + + 原生连接器 `@tdengine/client` 目前兼容的 Node.js 版本为:>=v10.20.0 <= v10.9.0 || >=v12.8.0 <= v12.9.1 + +3. "Unable to establish connection","Unable to resolve FQDN" + + 一般都是因为配置 FQDN 不正确。 可以参考[如何彻底搞懂 TDengine 的 FQDN](https://www.taosdata.com/blog/2021/07/29/2741.html) 。 + +## 重要更新记录 + +### 原生连接器 + +| package name | version | TDengine version | 说明 | +|------------------|---------|---------------------|------------------------------------------------------------------| +| @tdengine/client | 3.0.0 | 3.0.0 | 支持TDengine 3.0 且不与2.x 兼容。 | +| td2.0-connector | 2.0.12 | 2.4.x;2.5.x;2.6.x | 修复 cursor.close() 报错的 bug。 | +| td2.0-connector | 2.0.11 | 2.4.x;2.5.x;2.6.x | 支持绑定参数、json tag、schemaless 接口等功能。 | +| td2.0-connector | 2.0.10 | 2.4.x;2.5.x;2.6.x | 支持连接管理,普通查询、连续查询、获取系统信息、订阅功能等功能。 | +### REST 连接器 + +| package name | version | TDengine version | 说明 | +|----------------------|---------|---------------------|---------------------------------------------------------------------------| +| @tdengine/rest | 3.0.0 | 3.0.0 | 支持 TDegnine 3.0,且不与2.x 兼容。 | +| td2.0-rest-connector | 1.0.7 | 2.4.x;2.5.x;2.6.x | 移除默认端口 6041。 | +| td2.0-rest-connector | 1.0.6 | 2.4.x;2.5.x;2.6.x | 修复create,insert,update,alter 等SQL 执行返回的 affectRows 错误的bug。 | +| td2.0-rest-connector | 1.0.5 | 2.4.x;2.5.x;2.6.x | 支持云服务 cloud Token; | +| td2.0-rest-connector | 1.0.3 | 2.4.x;2.5.x;2.6.x | 支持连接管理、普通查询、获取系统信息、错误信息、连续查询等功能。 | + +## API 参考 + +[API 参考](https://docs.taosdata.com/api/td2.0-connector/) diff --git a/docs/zh/10-programming/06-connector/06-csharp.md b/docs/zh/10-programming/06-connector/06-csharp.md new file mode 100644 index 0000000000000000000000000000000000000000..36bf54ad82de9ba636cd2afd81b2aed631c030eb --- /dev/null +++ b/docs/zh/10-programming/06-connector/06-csharp.md @@ -0,0 +1,100 @@ +--- +toc_max_heading_level: 4 +sidebar_label: C# +title: C# Connector +--- + +`TDengine.Connector` 是 TDengine 提供的 C# 语言连接器。C# 开发人员可以通过它开发存取 TDengine 集群数据的 C# 应用软件。 + +本文介绍如何在 Linux 或 Windows 环境中安装 `TDengine.Connector`,并通过 `TDengine.Connector` 连接 TDengine 集群,进行数据写入、查询等基本操作。 + +`TDengine.Connector` 的源码托管在 [GitHub](https://github.com/taosdata/taos-connector-dotnet/tree/3.0)。 + +## 版本支持 + +请参考[版本支持列表](../#版本支持) + +## 安装步骤 + +### 安装前准备 + +* 安装 [.NET SDK](https://dotnet.microsoft.com/download) +* [Nuget 客户端](https://docs.microsoft.com/en-us/nuget/install-nuget-client-tools) (可选安装) +* 安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../#安装客户端驱动) + +### 安装 TDengine.Connector + +### 通过 Nuget 增加`TDengine.Connector`包 + +```bash +dotnet add package TDengine.Connector +``` + +## 建立连接 + +``` XML +{{#include docs/examples/csharp/cloud-example/connect/connect.csproj}} +``` + +``` csharp +{{#include docs/examples/csharp/cloud-example/connect/Program.cs}} +``` + +## 使用示例 + +### 基本插入和查询 + +``` XML +{{#include docs/examples/csharp/cloud-example/usage/usage.csproj}} +``` + +```C# +{{#include docs/examples/csharp/cloud-example/usage/Program.cs}} +``` + +### STMT 插入 + +``` XML +{{#include docs/examples/csharp/cloud-example/stmt/stmt.csproj}} +``` + +```C# +{{#include docs/examples/csharp/cloud-example/stmt/Program.cs}} +``` + +## 重要更新记录 + +| TDengine.Connector | 说明 | +|--------------------|--------------------------------| +| 3.0.2 | 支持 .NET Framework 4.5 及以上,支持 .NET standard 2.0。Nuget Package 包含 WebSocket 动态库。 | +| 3.0.1 | 支持 WebSocket 和 Cloud,查询,插入,参数绑定。 | +| 3.0.0 | 支持 TDengine 3.0.0.0,不兼容 2.x。新增接口TDengine.Impl.GetData(),解析查询结果。 | +| 1.0.7 | 修复 TDengine.Query()内存泄露。 | +| 1.0.6 | 修复 schemaless 在 1.0.4 和 1.0.5 中失效 bug。 | +| 1.0.5 | 修复 Windows 同步查询中文报错 bug。 | +| 1.0.4 | 新增异步查询,订阅等功能。修复绑定参数 bug。 | +| 1.0.3 | 新增参数绑定、schemaless、 json tag等功能。 | +| 1.0.2 | 新增连接管理、同步查询、错误信息等功能。 | + +## 其他说明 + +### 第三方驱动 + +[`IoTSharp.Data.Taos`](https://github.com/IoTSharp/EntityFrameworkCore.Taos) 是一个 TDengine 的 ADO.NET 连接器,其中包含了用于EntityFrameworkCore 的提供程序 IoTSharp.EntityFrameworkCore.Taos 和健康检查组件 IoTSharp.HealthChecks.Taos ,支持 Linux,Windows 平台。该连接器由社区贡献者`麦壳饼@@maikebing` 提供,具体请参考: + +* 接口下载: +* 用法说明: + +## 常见问题 + +1. "Unable to establish connection","Unable to resolve FQDN" + + 一般是因为 FQDN 配置不正确。可以参考[如何彻底搞懂 TDengine 的 FQDN](https://www.taosdata.com/blog/2021/07/29/2741.html)解决。 + +2. Unhandled exception. System.DllNotFoundException: Unable to load DLL 'taos' or one of its dependencies: 找不到指定的模块。 + + 一般是因为程序没有找到依赖的客户端驱动。解决方法为:Windows 下可以将 `C:\TDengine\driver\taos.dll` 拷贝到 `C:\Windows\System32\ ` 目录下,Linux 下建立如下软链接 `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。 + +## API 参考 + +[API 参考](https://docs.taosdata.com/api/connector-csharp/html/860d2ac1-dd52-39c9-e460-0829c4e5a40b.htm) diff --git a/docs/zh/10-programming/06-connector/09-rest-api.md b/docs/zh/10-programming/06-connector/09-rest-api.md new file mode 100644 index 0000000000000000000000000000000000000000..97585d9374a37f849029120ed2b17df5552a4ce9 --- /dev/null +++ b/docs/zh/10-programming/06-connector/09-rest-api.md @@ -0,0 +1,306 @@ +--- +title: REST API +sidebar_label: REST API +description: 详细介绍 TDengine 提供的 RESTful API. +--- + +为支持各种不同类型平台的开发,TDengine 提供符合 RESTful 设计标准的 API,即 REST API。为最大程度降低学习成本,不同于其他数据库 REST API 的设计方法,TDengine 直接通过 HTTP POST 请求 BODY 中包含的 SQL 语句来操作数据库,仅需要一个 URL。 + +:::note +与原生连接器的一个区别是,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,所有对表名、超级表名的引用都需要指定数据库名前缀。支持在 RESTful URL 中指定 db_name,这时如果 SQL 语句中没有指定数据库名前缀的话,会使用 URL 中指定的这个 db_name。 +::: + +## 安装 + +RESTful 接口不依赖于任何 TDengine 的库,因此客户端不需要安装任何 TDengine 的库,只要客户端的开发语言支持 HTTP 协议即可。TDengine 的 RESTful API 由 [taosAdapter](../../reference/taosadapter) 提供,在使用 RESTful API 之前需要确保 `taosAdapter` 正常运行。 + +## 验证 + +在已经安装 TDengine 服务器端的情况下,可以按照如下方式进行验证。 + +下面以 Ubuntu 环境中使用 `curl` 工具(请确认已经安装)来验证 RESTful 接口是否工作正常,验证前请确认 taosAdapter 服务已开启,在 Linux 系统上此服务默认由 systemd 管理,使用命令 `systemctl start taosadapter` 启动。 + +下面示例是列出所有的数据库都在 `TDengine Cloud URL` 主机上面。如果您正在访问 TDengine Cloud ,您必须使用云服务的令牌。 + +```bash +curl -L \ + -d "select name, ntables, status from information_schema.ins_databases;" \ + /rest/sql?token= +``` + +返回值结果如下表示验证通过: + +```json +{ + "code": 0, + "column_meta": [ + [ + "name", + "VARCHAR", + 64 + ], + [ + "ntables", + "BIGINT", + 8 + ], + [ + "status", + "VARCHAR", + 10 + ] + ], + "data": [ + [ + "information_schema", + 16, + "ready" + ], + [ + "performance_schema", + 9, + "ready" + ] + ], + "rows": 2 +} +``` + +## HTTP 请求格式 + +```text +https:///rest/sql/[db_name]?token=TDENGINE_CLOUD_TOKEN +``` + +参数说明: + +- TDENGINE_CLOUD_URL: TDengine Cloud 的地址。 +- db_name: 可选参数,指定本次所执行的 SQL 语句的默认数据库库名。 +- token: 用来访问 TDengine Cloud 。 + +例如:`https://gw.cloud.taosdata.com/rest/sql/test?token=xxxxxxxxx` 是指向地址为 `gw-aws.cloud.tdengine:80` 的 URL,并将默认使用的数据库库名设置为 `test`。 + +HTTP 请求的 BODY 里就是一个完整的 SQL 语句,SQL 语句中的数据表应提供数据库前缀,例如 db_name.tb_name。如果表名不带数据库前缀,又没有在 URL 中指定数据库名的话,系统会返回错误。因为 HTTP 模块只是一个简单的转发,没有当前 DB 的概念。 + +使用 `curl` 通过自定义身份认证方式来发起一个 HTTP Request,语法如下: + +```bash +curl -L -d "" /rest/sql/[db_name]?token=TDENGINE_CLOUD_TOKEN +``` + +## HTTP 返回格式 + +### HTTP 响应码 + +| **response code** | **说明** | +|-------------------|----------------| +| 200 | 正确返回和 C 接口错误返回 | +| 400 | 参数错误返回 | +| 401 | 鉴权失败 | +| 404 | 接口不存在 | +| 500 | 内部错误 | +| 503 | 系统资源不足 | + +### HTTP body 结构 + +#### 正确执行插入 + +样例: + +```json +{ + "code": 0, + "column_meta": [["affected_rows", "INT", 4]], + "data": [[0]], + "rows": 1 +} +``` + +说明: + +- code:(`int`)0 代表成功。 +- column_meta:(`[1][3]any`)只返回 `[["affected_rows", "INT", 4]]`。 +- rows:(`int`)只返回 `1`。 +- data:(`[][]any`)返回受影响行数。 + +#### 正确执行查询 + +样例: + +```json +{ + "code": 0, + "column_meta": [ + ["ts", "TIMESTAMP", 8], + ["count", "BIGINT", 8], + ["endpoint", "VARCHAR", 45], + ["status_code", "INT", 4], + ["client_ip", "VARCHAR", 40], + ["request_method", "VARCHAR", 15], + ["request_uri", "VARCHAR", 128] + ], + "data": [ + [ + "2022-06-29T05:50:55.401Z", + 2, + "LAPTOP-NNKFTLTG:6041", + 200, + "172.23.208.1", + "POST", + "/rest/sql" + ], + [ + "2022-06-29T05:52:16.603Z", + 1, + "LAPTOP-NNKFTLTG:6041", + 200, + "172.23.208.1", + "POST", + "/rest/sql" + ], + [ + "2022-06-29T06:28:14.118Z", + 1, + "LAPTOP-NNKFTLTG:6041", + 200, + "172.23.208.1", + "POST", + "/rest/sql" + ], + [ + "2022-06-29T05:52:16.603Z", + 2, + "LAPTOP-NNKFTLTG:6041", + 401, + "172.23.208.1", + "POST", + "/rest/sql" + ] + ], + "rows": 4 +} +``` + +说明: + +- code:(`int`)0 代表成功。 +- column_meta:(`[][3]any`) 列信息,每个列会用三个值来说明,分别为:列名(string)、列类型(string)、类型长度(int)。 +- rows:(`int`)数据返回行数。 +- data:(`[][]any`)具体数据内容(时间格式仅支持 RFC3339,结果集为 0 时区)。 + +列类型使用如下字符串: + +- "NULL" +- "BOOL" +- "TINYINT" +- "SMALLINT" +- "INT" +- "BIGINT" +- "FLOAT" +- "DOUBLE" +- "VARCHAR" +- "TIMESTAMP" +- "NCHAR" +- "TINYINT UNSIGNED" +- "SMALLINT UNSIGNED" +- "INT UNSIGNED" +- "BIGINT UNSIGNED" +- "JSON" + +#### 错误 + +样例: + +```json +{ + "code": 9728, + "desc": "syntax error near \"1\"" +} +``` + +说明: + +- code:(`int`)错误码。 +- desc:(`string`)错误描述。 + +## 使用示例 + +- 在 demo 库里查询表 d1001 的所有记录: + + ```bash + export TDENGINE_CLOUD_URL=https://gw.cloud.taosdata.com + export TDENGINE_CLOUD_TOKEN= + curl -L -d "select * from demo.d1001" $TDENGINE_CLOUD_URL/rest/sql?token=$TDENGINE_CLOUD_TOKEN + ``` + + 返回值: + + ```json + { + "code": 0, + "column_meta": [ + [ + "ts", + "TIMESTAMP", + 8 + ], + [ + "current", + "FLOAT", + 4 + ], + [ + "voltage", + "INT", + 4 + ], + [ + "phase", + "FLOAT", + 4 + ] + ], + "data": [ + [ + "2022-07-30T06:44:40.32Z", + 10.3, + 219, + 0.31 + ], + [ + "2022-07-30T06:44:41.32Z", + 12.6, + 218, + 0.33 + ] + ], + "rows": 2 + } + ``` + +- 创建库 demo: + + ```bash + curl -L -d "create database demo" $TDENGINE_CLOUD_URL/rest/sql?token=$TDENGINE_CLOUD_TOKEN + ``` + + 返回值: + + ```json + { + "code": 0, + "column_meta": [ + [ + "affected_rows", + "INT", + 4 + ] + ], + "data": [ + [ + 0 + ] + ], + "rows": 1 + } + ``` diff --git a/docs/zh/10-programming/06-connector/_category_.yml b/docs/zh/10-programming/06-connector/_category_.yml new file mode 100644 index 0000000000000000000000000000000000000000..abd3f666f3b93697fde59931ffd7b10a0308b6b7 --- /dev/null +++ b/docs/zh/10-programming/06-connector/_category_.yml @@ -0,0 +1 @@ +label: "连接器" \ No newline at end of file diff --git a/docs/zh/10-programming/06-connector/index.md b/docs/zh/10-programming/06-connector/index.md new file mode 100644 index 0000000000000000000000000000000000000000..5a69bbe93559714b537fd0be455f60d8a9a886b6 --- /dev/null +++ b/docs/zh/10-programming/06-connector/index.md @@ -0,0 +1,14 @@ +--- +sidebar_label: 连接器引用 +title: 连接器引用 +description: 详细介绍各种语言的连接器及 REST API +--- + +TDengine 提供了丰富的应用程序开发接口,为了便于用户快速开发自己的应用,TDengine 支持了多种编程语言的连接器,其中官方连接器包括支持 Python、Java、Go、Rust、Node.js和 C# 的连接器,以及 REST 接口。 + +```mdx-code-block +import DocCardList from '@theme/DocCardList'; +import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; + + +``` \ No newline at end of file diff --git a/docs/zh/10-programming/_category_.yml b/docs/zh/10-programming/_category_.yml new file mode 100644 index 0000000000000000000000000000000000000000..509a9405c42939a4819b87669a4c5b244bd29a8b --- /dev/null +++ b/docs/zh/10-programming/_category_.yml @@ -0,0 +1 @@ +label: 开发指南 \ No newline at end of file diff --git a/docs/zh/10-programming/create-database.png b/docs/zh/10-programming/create-database.png new file mode 100644 index 0000000000000000000000000000000000000000..9fad193a686991d4362e0ae6570a87750f546d1d Binary files /dev/null and b/docs/zh/10-programming/create-database.png differ diff --git a/docs/zh/10-programming/index.md b/docs/zh/10-programming/index.md new file mode 100644 index 0000000000000000000000000000000000000000..28618916374474139336f37e6028cef3d3b170ca --- /dev/null +++ b/docs/zh/10-programming/index.md @@ -0,0 +1,27 @@ +--- +title: 开发指南 +sidebar_label: 开发指南 +description: 让开发者能够快速上手的指南 +--- + +开发一个应用,如果你准备采用 TDengine 作为时序数据处理的工具,那么有如下几个事情要做: + +1. 确定应用到 TDengine 的连接方式。无论你使用何种编程语言,你总是可以使用 REST 接口, 但也可以使用每种编程语言独有的连接器进行方便的连接。 +2. 根据自己的应用场景,确定数据模型。根据数据特征,决定建立一个还是多个库;分清静态标签、采集量,建立正确的超级表,建立子表。 +3. 决定插入数据的方式。TDengine 支持使用标准的 SQL 写入,但同时也支持 Schemaless 模式写入,这样不用手工建表,可以将数据直接写入。 +4. 根据业务要求,看需要撰写哪些 SQL 查询语句。 +5. 如果你要基于时序数据做轻量级的实时统计分析,包括各种监测看板,那么建议你采用 TDengine 3.0 的流式计算功能,而不用额外部署 Spark, Flink 等复杂的流式计算系统。 +6. 如果你的应用有模块需要消费插入的数据,希望有新的数据插入时,就能获取通知,那么建议你采用 TDengine 提供的数据订阅功能,而无需专门部署 Kafka 或其他消息队列软件。 +7. 在很多场景下(如车辆管理),应用需要获取每个数据采集点的最新状态,那么建议你采用 TDengine 的 Cache 功能,而不用单独部署 Redis 等缓存软件。 +8. 如果你发现 TDengine 的函数无法满足你的要求,那么你可以使用用户自定义函数(UDF)来解决问题。 + +本部分内容就是按照上述顺序组织的。为便于理解,TDengine 为每个功能和每个支持的编程语言都提供了示例代码。如果你希望深入了解 SQL 的使用,需要查看[SQL 手册](../taos-sql/)。如果想更深入地了解各连接器的使用,请阅读[连接器参考指南](./connector/)。更多的方式往 TDengine 里面写入数据,请参考[Data In](../data-in),另外更多的方式从 TDengine 读取数据,请参考[Data Out](../data-out)。 + +如果在开发过程中遇到任何问题,请点击每个页面下方的["反馈问题"](https://github.com/taosdata/TDengine/issues/new/choose), 在 GitHub 上直接递交 Issue。 + +```mdx-code-block +import DocCardList from '@theme/DocCardList'; +import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; + + +``` diff --git a/docs/zh/11-visual/01-grafana.md b/docs/zh/11-visual/01-grafana.md new file mode 100644 index 0000000000000000000000000000000000000000..fd3246a8db783241f72c13316168343b0ba59fd5 --- /dev/null +++ b/docs/zh/11-visual/01-grafana.md @@ -0,0 +1,54 @@ +--- +sidebar_label: Grafana +title: TDengine Cloud 与 Grafana 集成 +--- + +TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/) 快速集成搭建数据监测报警系统,整个过程无需任何代码开发,TDengine 中数据表的内容可以在仪表盘(DashBoard)上进行可视化展现。关于 TDengine 插件的使用您可以在[GitHub](https://github.com/taosdata/grafanaplugin/blob/master/README.md)中了解更多。 + +## 安装 Grafana + +目前 TDengine 支持 Grafana 7.5 以上的版本。用户可以根据当前的操作系统,到 Grafana 官网下载安装包,并执行安装。下载地址如下:。 + +## 安装 TDengine 插件 + +### 通过图形化界面安装 + +TDengine 数据源插件已经在发布成 Grafana 官方插件,您可以通过 Grafana 配置 GUI 来安装它。在任何已经安装 Grafana 的平台上面,您可以打开链接 `http://localhost:3000`,然后点击左边的插件菜单。 + +![点击插件菜单](./grafana/click-plugin-menu-from-config.webp) + +输入关键字 `TDengine` 来搜索: + +![在 Grafana 插件里面搜索 TDengine](./grafana/search-tdengine-from-config.webp) + + +### 使用安装脚本 + +请复制下面的脚本命令来为数据源安装设置 `TDENGINE_CLOUD_URL` 和 `TDENGINE_CLOUD_TOKEN` 的环境变量: + +```bash +export TDENGINE_CLOUD_TOKEN="" +export TDENGINE_CLOUD_URL="" +``` + +从 Linux 终端运行下面的脚本来安装 TDengine 数据源插件。 + +```bash +bash -c "$(curl -fsSL https://raw.githubusercontent.com/taosdata/grafanaplugin/master/install.sh)" +``` + +安装结束以后,请重启 grafana-server。 + +```bash +sudo systemctl restart grafana-server.service +``` + +## 验证插件 + +用户可以通过 URL `http://localhost:3000`直接登录 Grafana 服务器(初始的用户名和密码是:admin/admin)。在左边点击`Configuration -> Data Sources`。然后点击 `Test` 按钮来验证 TDengine 数据源是否工作。如果测试通过,您应该可以看到成功消息。 + +![验证 TDengine 数据源](./grafana/verifying-tdengine-datasource.webp) + +## Use Grafana + +请创建一个新的仪表盘,或者导入存在的仪表盘来展示 TDengine 里面的数据。同时更多细节请参考[文档](https://docs.tdengine.com/third-party/grafana#create-dashboard)。 diff --git a/docs/zh/11-visual/02-gds.md b/docs/zh/11-visual/02-gds.md new file mode 100644 index 0000000000000000000000000000000000000000..c99fcf90b16652868483e31025559787ad5d239b --- /dev/null +++ b/docs/zh/11-visual/02-gds.md @@ -0,0 +1,71 @@ +--- +sidebar_label: Google Data Studio +title: 使用Google Data Studio +description: 使用 Google Data Studio 存取 TDengine 数据的详细指南 +--- + +使用[合作伙伴的连接器](https://datastudio.google.com/data?search=TDengine),Google Data Studio可以快速访问 TDengine, 并且通过基于网页的报表功能可以快速创建交互式的报表和仪表盘。整个过程不需要任何的代码编写过程。可以分享报表和仪表盘给不同的个人,团队以及全世界,还可以跟其他人员实时协作,另外在任何的网页里面嵌入您的报表。 + +更多使用 Data Studio 和 TDengine 集成可以参考[GitHub](https://github.com/taosdata/gds-connector/blob/master/README.md)。 + +## 选择数据源 + +目前的 [连接器](https://datastudio.google.com/data?search=TDengine) 支持两种不同的数据源:TDengine Server 和 TDengine Cloud。首先选择”TDengine Cloud“类型然后点击”下一步“。 + +![Data Studio 数据源选择器](./gds/gds_data_source.webp) + +## 连接器配置 + +### 必须的配置 + +#### URL + + +获取实际的URL,请登录[TDengine Cloud](https://cloud.taosdata.com) 后点击左边的”可视化“菜单,然后选择”Google Data Studio“。 + + +#### TDengine Cloud 令牌 + + +获取实际的令牌,请登录[TDengine Cloud](https://cloud.taosdata.com) 后点击左边的”可视化“菜单,然后选择”Google Data Studio“。 + + + +#### 数据库 + +数据库的名称,该数据库包含您想查询数据和创建报表的的表,可以是一般表,超级表或者子表。 + +#### 表 + +您希望查询数据和执行报表的表的名称 + +**注意** 可以获取的最大记录行数是1000000。 + +### 可选配置 + +#### 查询开始时间和结束时间 + +在页面上面配置您的连接器的两个时间输入框,这两个时间过滤条件是用来过滤大量数据的。时间输入框的格式是”YYYY-MM-DD HH:MM:SS“。 +比如: + +``` bash +2022-05-12 18:24:15 +``` + +查询结果的开始时间戳是由 `start date` 定义的。加上这个条件,您不会获取到在 `start date` 时间戳之前的数据。 + +`end time`输入框表明查询结束的时间戳。因此,在结束时间戳之后的数据也获取不到。这些条件是利用 SQL 的 where 语句来实现的。比如: + +```SQL +-- select * from table_name where ts >= start_date and ts <= end_date +select * from test.demo where ts >= '2022-05-10 18:24:15' and ts<='2022-05-12 18:24:15' +``` + +事实上,您可通过一些过滤器来加快报表加载数据的速度。 +![TDengine Cloud 配置页面](./gds/gds_cloud_login.webp) + +在配置完成以后,点击"CONNECT"按钮,您就会连接上您的具有给定数据库和表的”TDengine Cloud “。 + +## 创建报表和仪表盘 + +使用交互式仪表盘和优美报表解锁您的 TDengine 数据能力,更多详情请参考[文档](https://docs.tdengine.com/third-party/google-data-studio/) diff --git a/docs/zh/11-visual/gds/gds_cloud_login.webp b/docs/zh/11-visual/gds/gds_cloud_login.webp new file mode 100644 index 0000000000000000000000000000000000000000..17695f2235795823e4c81f89dd2357d499f95e67 Binary files /dev/null and b/docs/zh/11-visual/gds/gds_cloud_login.webp differ diff --git a/docs/zh/11-visual/gds/gds_data_source.webp b/docs/zh/11-visual/gds/gds_data_source.webp new file mode 100644 index 0000000000000000000000000000000000000000..a1c7ddba2af5b11864add30b377062e28fabca91 Binary files /dev/null and b/docs/zh/11-visual/gds/gds_data_source.webp differ diff --git a/docs/zh/11-visual/grafana/add_datasource1.webp b/docs/zh/11-visual/grafana/add_datasource1.webp new file mode 100644 index 0000000000000000000000000000000000000000..211edc4457abd0db6b0ef64636d61d65b5f43db6 Binary files /dev/null and b/docs/zh/11-visual/grafana/add_datasource1.webp differ diff --git a/docs/zh/11-visual/grafana/add_datasource2.webp b/docs/zh/11-visual/grafana/add_datasource2.webp new file mode 100644 index 0000000000000000000000000000000000000000..8ab547231fee4d3b0874fcfe08c0ce152b0c53a1 Binary files /dev/null and b/docs/zh/11-visual/grafana/add_datasource2.webp differ diff --git a/docs/zh/11-visual/grafana/add_datasource3.webp b/docs/zh/11-visual/grafana/add_datasource3.webp new file mode 100644 index 0000000000000000000000000000000000000000..5ba81814c82e0ede6c453ebe9c98a34fde15a683 Binary files /dev/null and b/docs/zh/11-visual/grafana/add_datasource3.webp differ diff --git a/docs/zh/11-visual/grafana/add_datasource4.webp b/docs/zh/11-visual/grafana/add_datasource4.webp new file mode 100644 index 0000000000000000000000000000000000000000..cd82156acbf3c1a37f1a30de5a0578f9ef79617f Binary files /dev/null and b/docs/zh/11-visual/grafana/add_datasource4.webp differ diff --git a/docs/zh/11-visual/grafana/click-plugin-menu-from-config.webp b/docs/zh/11-visual/grafana/click-plugin-menu-from-config.webp new file mode 100644 index 0000000000000000000000000000000000000000..a27c1ea1b7c9497e8caf6b5863942d79b23f2e61 Binary files /dev/null and b/docs/zh/11-visual/grafana/click-plugin-menu-from-config.webp differ diff --git a/docs/zh/11-visual/grafana/create_dashboard1.webp b/docs/zh/11-visual/grafana/create_dashboard1.webp new file mode 100644 index 0000000000000000000000000000000000000000..55eb388833e4df2a46f4d1cf6d346aa11429385d Binary files /dev/null and b/docs/zh/11-visual/grafana/create_dashboard1.webp differ diff --git a/docs/zh/11-visual/grafana/create_dashboard2.webp b/docs/zh/11-visual/grafana/create_dashboard2.webp new file mode 100644 index 0000000000000000000000000000000000000000..bb40e407187718c52e9f617d8ebd3d25fd14b56b Binary files /dev/null and b/docs/zh/11-visual/grafana/create_dashboard2.webp differ diff --git a/docs/zh/11-visual/grafana/search-tdengine-from-config.webp b/docs/zh/11-visual/grafana/search-tdengine-from-config.webp new file mode 100644 index 0000000000000000000000000000000000000000..ddc70cc15aa4b3d4d4a1ec612fda4e0c52559a98 Binary files /dev/null and b/docs/zh/11-visual/grafana/search-tdengine-from-config.webp differ diff --git a/docs/zh/11-visual/grafana/verifying-tdengine-datasource.webp b/docs/zh/11-visual/grafana/verifying-tdengine-datasource.webp new file mode 100644 index 0000000000000000000000000000000000000000..361203540a42375b1575c9a92eb3c39d21997311 Binary files /dev/null and b/docs/zh/11-visual/grafana/verifying-tdengine-datasource.webp differ diff --git a/docs/zh/11-visual/index.md b/docs/zh/11-visual/index.md new file mode 100644 index 0000000000000000000000000000000000000000..b5d89dcbb2e4af636d4100d1bc989935561eeeaa --- /dev/null +++ b/docs/zh/11-visual/index.md @@ -0,0 +1,7 @@ +--- +sidebar_label: 可视化 +title: 可视化 +description: TDengine 的可视化。 +--- + +本章节主要介绍一些查看 TDengine 数据的可视化方式,比如运行状态和存储的数据情况。更多的可视化方式会马上到来。 diff --git a/docs/zh/12-data-subscription/example/share-topic-example.webp b/docs/zh/12-data-subscription/example/share-topic-example.webp new file mode 100644 index 0000000000000000000000000000000000000000..47556e6aed5190281f5c8ff0620ea63d01b12848 Binary files /dev/null and b/docs/zh/12-data-subscription/example/share-topic-example.webp differ diff --git a/docs/zh/12-data-subscription/index.md b/docs/zh/12-data-subscription/index.md new file mode 100644 index 0000000000000000000000000000000000000000..4bc2466bd6ac6d2f4a2b3c13a614a30fb0c82e29 --- /dev/null +++ b/docs/zh/12-data-subscription/index.md @@ -0,0 +1,492 @@ +--- +sidebar_label: 数据订阅 +title: 数据订阅 +description: 通过主题来完成数据订阅并分享给 TDengien Cloud 的其他用户。 +--- +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +TDengine 提供类似于消息队列产品的数据订阅和消费接口。这些接口使应用程序更容易获取实时写入 TDengine 的数据,并且按照事件发生的顺序处理数据。这简化了您的时序数据处理系统,并降低了您的成本,无需要部署像 Kafka 这样的消息队列产品。 + +您可以像在 Kafka 中使用主题一样来使用 TDengine 数据订阅。不同的是,TDengine 的主题是基于现有的超级表、表或子表的查询条件来定义的,也就是一个简单的 SELECT 语句。您可以使用 SQL 语言通过标签、表名、列或表达式来过滤数据,然后对数据执行标量函数或用户自定义的函数,暂不支持聚合函数。这些特性使 TDengine 数据订阅比类似产品有更多的灵活性,并且数据的颗粒度可以由应用程序自己按需控制,而过滤和预处理则由 TDengine 而不是应用程序处理。这种实现方式大大减少了传输的数据量和应用程序的复杂性。 + +通过订阅一个主题,消费者可以实时获得该主题的最新数据。并且多个消费者可以组成一个消费者组,共同消费这些数据。消费者组通过多线程、分布式数据消费能够实现更快的速度。请注意,订阅同一主题的不同组的消费者不会一起消费消息。一个消费者可以订阅多个主题。如果超级表中的数据被分散在多个虚拟节点中,消费者组可以比单个消费者更有效地消费它。TDengine 还包括一个确认机制,在机器可能崩溃或重启的复杂环境中确保至少一次完成消费。 + +该主题介绍了如何通过 TDengine Cloud 的访问控制管理和每个支持的连接器的订阅接口来共享 TDengine 实例的数据。数据所有者只需要通过创建主题,将想分享数据的用户或用户组添加到主题的订阅者。主题的订阅者就可以从 TDengine 获得关于如何以数据订阅方式访问共享数据的细节。在本文件中,我们将简要地解释数据共享的这些主要步骤。 + +## 创建主题 + +您可以在 TDengine Cloud 的数据订阅菜单中创建主题。在创建主题的对话框中,您可以选择向导或者 SQL 方式来创建主题。在向导方式下,您需要输入主题名称并选择当前 TDengine 实例的数据库。然后选择超级表或者指定超级表或子表的子查询。您也可以添加字段选择或为每个字段添加结果函数和过滤条件。下面,您可以了解如何通过向导方式在创建三种主题。 + +### 数据库 + +**添加新主题**对话框中的默认选择是**数据库**类型。在选择**数据库**后,您可以点击**确认**按钮,为数据库创建一个主题。 +![创建一个新主题](./topic/add-topic-db.webp) + +### 超级表 + +In the opened Add New Topic dialog, you can click STable type and select a specified super table from the selections. Then click Confirm button to create a topic to a super table. +在打开的**添加新主题**对话框中,您可以点击**超级表**类型并从选择中选择一个指定的超级表。然后点击**确认**按钮,为超级表创建一个主题。 +![在一个超级表上面创建主题](./topic/add-topic-stable.webp) + +### 子查询 + +在打开的**添加新主题**对话框中,您可以点击**子查询**类型来显示所有的子查询表项目。第一个项目是表类型,默认选择是**超级表**。在您选择或输入一个超级表的名称后,下面将显示超级表的所有字段。您可以为子查询勾选或取消勾选每个字段,也可以为每个字段设置结果函数或过滤条件。如果您想预览最终生产的 SQL 语句,请点击**SQL 预览**按钮,打开一个 SQL 对话框来预览。 +![在超级表子查询上创建主题](./topic/add-topic-sub-stable.webp) + +您可以选择另一个表表,然后从选择框中选择一个表,或者输入一个存在的表名,就可以获得所选表的所有字段。您可以为子查询勾选或取消勾选每个字段,也可以为每个字段设置结果函数或过滤条件。如果您想预览最终生产的 SQL 语句,请点击**SQL 预览**按钮,打开一个 SQL 对话框来预览。 +![Create a new topic with subquery for table](./topic/add-topic-sub-table.webp) + +## 共享主题 + +在主题页面中,您可以点击共享主题的操作图标,进入共享主题页面。您也可以直接点击**共享主题**选项卡来切换到正确的位置。在**共享主题**选项卡中的**可消费该主题的用户**页面, 您可以获得消费该主题的一个分享。 + +### 可消费该主题的用户 + +在**共享主题**页面的默认标签**可消费该主题的用户**中,您可以点击**添加可消费该主题的用户**按钮来添加更多在当前组织中活跃的用户。在打开的**添加新用户**对话框中,您可以选择您想分享主题的新用户,并设置分享给这些用户的过期时间。 +![共享主题](./share/share-topic-users.webp) + +![添加可消费该主题的用户](./share/share-topic-adduser.webp) + +### 可消费该主题的用户组 + +您可以点击**可消费该主题的用户组**标签,切换到共享主题的用户组页面。然后您可以点击**添加可消费该主题的用户组**按钮,添加更多在当前组织中活跃的用户组。在打开的**添加新用户组**对话框中,您可以选择您想共享主题的新用户组,并设置共享给这些用户组的过期时间。 +![可消费该主题的用户组](./share/share-topic-usergroup.webp) + +![添加可消费该主题的用户组](./share/share-topic-addusergroup.webp) + +## Consume Shared Topic + +当共享用户进入数据订阅的主题页面时,他可以获得创建者与他共享的所有主题。用户可以点击每个主题的**示例代码**图标,进入**示例代码**页面的操作区域。然后,他可以按照示例代码的步骤,从 TDengine 实例消费共享主题。 +![共享主题示例](./example/share-topic-example.webp) + +### 数据模型和应用接口 + +下面会介绍多种语言的相关数据模型和应用接口 + + + + +```go +func NewConsumer(conf *Config) (*Consumer, error) + +func (c *Consumer) Close() error + +func (c *Consumer) Commit(ctx context.Context, message unsafe.Pointer) error + +func (c *Consumer) FreeMessage(message unsafe.Pointer) + +func (c *Consumer) Poll(timeout time.Duration) (*Result, error) + +func (c *Consumer) Subscribe(topics []string) error + +func (c *Consumer) Unsubscribe() error +``` + + + + + +```rust +impl TBuilder for TmqBuilder + fn from_dsn(dsn: D) -> Result + fn build(&self) -> Result + +impl AsAsyncConsumer for Consumer + async fn subscribe, I: IntoIterator + Send>( + &mut self, + topics: I, + ) -> Result<(), Self::Error>; + fn stream( + &self, + ) -> Pin< + Box< + dyn '_ + + Send + + futures::Stream< + Item = Result<(Self::Offset, MessageSet), Self::Error>, + >, + >, + >; + async fn commit(&self, offset: Self::Offset) -> Result<(), Self::Error>; + + async fn unsubscribe(self); +``` + +了解更多详情,请点击[Crate taos](https://docs.rs/taos)。 + + + + +```python +class TaosConsumer(): + def __init__(self, *topics, **configs) + + def __iter__(self) + + def __next__(self) + + def sync_next(self) + + def subscription(self) + + def unsubscribe(self) + + def close(self) + + def __del__(self) +``` + + + + +### 配置 TDengine DSN + +您必须先为 GO 语言和 RUST 语言设置下面的内容 + + + +```shell +export TDENGINE_CLOUD_TMQ="" +``` + + + + +```shell +set TDENGINE_CLOUD_TMQ= +``` + + + + +```powershell +$env:TDENGINE_CLOUD_TMQ='' +``` + + + + +:::note +请使用实际值替换,格式类似于`wss://)/rest/tmq?token=`。获取`TDENGINE_CLOUD_TMQ`的实际值,请登录[TDengine Cloud](https://cloud.taosdata.com),然后在左边菜单点击**数据订阅**,然后再想消费的主题旁边的**示例代码**操作图标进入该主题的**示例代码**部分。 +::: + +特别对于 Python 语言,您需要设置下面的环境变量: + + + +```shell +export TDENGINE_CLOUD_ENDPOINT="" +export TDENGINE_CLOUD_TOKEN="" +``` + + + + +```shell +set TDENGINE_CLOUD_ENDPOINT= +set TDENGINE_CLOUD_TOKEN= +``` + + + + +```powershell +$env:TDENGINE_CLOUD_ENDPOINT='' +$env:TDENGINE_CLOUD_TOKEN='' +``` + + + + +:::note +请使用实际值替换。获取这些实际值,请登录[TDengine Cloud](https://cloud.taosdata.com),然后在左边菜单点击**数据订阅**,然后再想消费的主题旁边的**示例代码**操作图标进入该主题的**示例代码**部分。 +::: + +### 从实例创建消费者 + + +消费者需要通过一系列配置选项创建,基础配置项如下表所示: + +| 参数名称 | 类型 | 参数说明 | 备注 | +| :----------------------------: | :-----: | -------------------------------------------------------- | ------------------------------------------- | +| `td.connect.ip` | string | 用于创建连接,同 `taos_connect` | | +| `td.connect.user` | string | 用于创建连接,同 `taos_connect` | | +| `td.connect.pass` | string | 用于创建连接,同 `taos_connect` | | +| `td.connect.port` | integer | 用于创建连接,同 `taos_connect` | | +| `group.id` | string | 消费组 ID,同一消费组共享消费进度 | **必填项**。最大长度:192。 | +| `client.id` | string | 客户端 ID | 最大长度:192。 | +| `auto.offset.reset` | enum | 消费组订阅的初始位置 | 可选:`earliest`(default), `latest`, `none` | +| `enable.auto.commit` | boolean | 是否启用消费位点自动提交 | 合法值:`true`, `false`。 | +| `auto.commit.interval.ms` | integer | 以毫秒为单位的消费记录自动提交消费位点时间间 | 默认 5000 m | +| `enable.heartbeat.background` | boolean | 启用后台心跳,启用后即使长时间不 poll 消息也不会造成离线 | 默认开启 | +| `experimental.snapshot.enable` | boolean | 是否允许从 TSDB 消费数据 | 实验功能,默认关闭 | +| `msg.with.table.name` | boolean | 是否允许从消息中解析表名, 不适用于列订阅(列订阅时可将 tbname 作为列写入 subquery 语句) | | + +对于不同编程语言,其设置方式如下: + + + + + +```go +import ( + "github.com/taosdata/driver-go/v3/common" + tmqcommon "github.com/taosdata/driver-go/v3/common/tmq" + "github.com/taosdata/driver-go/v3/ws/tmq" +) +tmqStr := os.Getenv("TDENGINE_CLOUD_TMQ") +consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ + "ws.url": tmqStr, + "ws.message.channelLen": uint(0), + "ws.message.timeout": common.DefaultMessageTimeout, + "ws.message.writeWait": common.DefaultWriteWait, + "group.id": "test_group", + "client.id": "test_consumer_ws", + "auto.offset.reset": "earliest", +}) +if err != nil { + panic(err) +} +``` + + + + + +```rust +let tmq_str = std::env::var("TDENGINE_CLOUD_TMQ")?; +let tmq_uri = format!( "{}&group.id=test_group_rs&client.id=test_consumer_ws", tmq_str); +println!("request tmq URI is {tmq_uri}\n"); +let tmq = TmqBuilder::from_dsn(tmq_uri,)?; +let mut consumer = tmq.build()?; +``` + + + + + +Python 程序可以配置的参数有: + +| 参数名称 | 类型 | 参数说明 | 备注 | +|:------:|:----:|:-------:|:---:| +| `td.connect.ip` | string | 用于创建连接|| +| `td.connect.user` | string | 用于创建连接|| +| `td.connect.pass` | string | 用于创建连接|| +| `td.connect.port` | string | 用于创建连接|| +| `group.id` | string | 消费组 ID,同一消费组共享消费进度 | **必填项**。最大长度:192 | +| `client.id` | string | 客户端 ID | 最大长度:192 | +| `msg.with.table.name` | string | 是否允许从消息中解析表名,不适用于列订阅 | 合法值:`true`, `false` | +| `enable.auto.commit` | string | 启用自动提交 | 合法值:`true`, `false` | +| `auto.commit.interval.ms` | string | 以毫秒为单位的自动提交时间间隔 | 默认值:5000 ms | +| `auto.offset.reset` | string | 消费组订阅的初始位置 | 可选:`earliest`(default), `latest`, `none` | +| `experimental.snapshot.enable` | string | 是否允许从 TSDB 消费数据 | 合法值:`true`, `false` | +| `enable.heartbeat.background` | string | 启用后台心跳,启用后即使长时间不 poll 消息也不会造成离线 | 合法值:`true`, `false` | + +```python +endpoint = os.environ["TDENGINE_CLOUD_ENDPOINT"] +token = os.environ["TDENGINE_CLOUD_TOKEN"] + +conf = { + # auth options + "td.connect.websocket.scheme": "wss", + "td.connect.ip": endpoint, + "td.connect.token": token, + # consume options + "group.id": "test_group_py", + "client.id": "test_consumer_ws_py", +} +consumer = Consumer(conf) +``` + + + + +上述配置中包括 consumer group ID,如果多个消费者指定的 consumer group ID 一样,则自动形成一个消费者组织,共享消费进度。 + +### 订阅主题 + +一个消费者支持同时订阅多个主题。 + + + + + +```go +err = consumer.Subscribe("", nil) +if err != nil { + panic(err) +} +``` + + + + +```rust +consumer.subscribe([""]).await?; +``` + + + + + +```python +consumer.subscribe([""]) +``` + + + + +:::note +请使用实际值替换。获取这个实际值,请登录[TDengine Cloud](https://cloud.taosdata.com),然后在左边菜单点击**数据订阅**,然后复制您想消费的主题名称。 +::: + +## 消费消息 + +以下代码展示了不同语言下如何对主题的消息进行消费。 + + + + + +```go +for { + ev := consumer.Poll(0) + if ev != nil { + switch e := ev.(type) { + case *tmqcommon.DataMessage: + fmt.Printf("get message:%v\n", e.String()) + consumer.Commit() + case tmqcommon.Error: + fmt.Printf("%% Error: %v: %v\n", e.Code(), e) + return + default: + fmt.Printf("unexpected event:%v\n", e) + return + } + } +} +``` + + + + + +```rust +// consume loop +consumer + .stream() + .try_for_each_concurrent(10, |(offset, message)| async { + let topic = offset.topic(); + // the vgroup id, like partition id in kafka. + let vgroup_id = offset.vgroup_id(); + println!("* in vgroup id {vgroup_id} of topic {topic}\n"); + + if let Some(data) = message.into_data() { + while let Some(block) = data.fetch_raw_block().await? { + // A two-dimension matrix while each cell is a [taos::Value] object. + let values = block.to_values(); + // Number of rows. + assert_eq!(values.len(), block.nrows()); + // Number of columns + assert_eq!(values[0].len(), block.ncols()); + println!("first row: {}", values[0].iter().join(", ")); + } + } + consumer.commit(offset).await?; + Ok(()) + }) + .await?; +``` + + + + +```python +while 1: + message = consumer.poll(timeout=1.0) + if message: + id = message.vgroup() + topic = message.topic() + database = message.database() + + for block in message: + nrows = block.nrows() + ncols = block.ncols() + for row in block: + print(row) + values = block.fetchall() + print(nrows, ncols) + else: + break +``` + + + + +## 结束消费 + +消费结束后,应当取消订阅。 + + + + + +```go +consumer.Close() +``` + + + + + +```rust +consumer.unsubscribe().await; +``` + + + + + +```py +# Unsubscribe +consumer.unsubscribe() +# Close consumer +consumer.close() +``` + + + + + +### 示例代码 + +在下面是完整的展示如何消费共享主题**test**的示例代码: + + + + + +```go +{{#include docs/examples/go/sub/cloud/main.go}} +``` + + + + + +```rust +{{#include docs/examples/rust/cloud-example/examples/sub.rs}} +``` + + + + + +```python +{{#include docs/examples/python/cloud/sub.py}} +``` + + + + diff --git a/docs/zh/12-data-subscription/share/share-topic-adduser.webp b/docs/zh/12-data-subscription/share/share-topic-adduser.webp new file mode 100644 index 0000000000000000000000000000000000000000..82b39292af2d8bcf189cf702436aa2b565ca130f Binary files /dev/null and b/docs/zh/12-data-subscription/share/share-topic-adduser.webp differ diff --git a/docs/zh/12-data-subscription/share/share-topic-addusergroup.webp b/docs/zh/12-data-subscription/share/share-topic-addusergroup.webp new file mode 100644 index 0000000000000000000000000000000000000000..93747d41792555ace27813d8c64256ce36353dde Binary files /dev/null and b/docs/zh/12-data-subscription/share/share-topic-addusergroup.webp differ diff --git a/docs/zh/12-data-subscription/share/share-topic-usergroup.webp b/docs/zh/12-data-subscription/share/share-topic-usergroup.webp new file mode 100644 index 0000000000000000000000000000000000000000..d9a0899e2f8461717d012a186a468b1807d111f1 Binary files /dev/null and b/docs/zh/12-data-subscription/share/share-topic-usergroup.webp differ diff --git a/docs/zh/12-data-subscription/share/share-topic-users.webp b/docs/zh/12-data-subscription/share/share-topic-users.webp new file mode 100644 index 0000000000000000000000000000000000000000..5d755aea30d1e34025351208c40cd67d132c34b2 Binary files /dev/null and b/docs/zh/12-data-subscription/share/share-topic-users.webp differ diff --git a/docs/zh/12-data-subscription/topic/add-topic-db.webp b/docs/zh/12-data-subscription/topic/add-topic-db.webp new file mode 100644 index 0000000000000000000000000000000000000000..1aee40bd35e42c946a3f9e659a1370f71f5b8c5e Binary files /dev/null and b/docs/zh/12-data-subscription/topic/add-topic-db.webp differ diff --git a/docs/zh/12-data-subscription/topic/add-topic-stable.webp b/docs/zh/12-data-subscription/topic/add-topic-stable.webp new file mode 100644 index 0000000000000000000000000000000000000000..1cc472439f4cddd68e7a1a3b97dab68216c3b9ec Binary files /dev/null and b/docs/zh/12-data-subscription/topic/add-topic-stable.webp differ diff --git a/docs/zh/12-data-subscription/topic/add-topic-sub-stable.webp b/docs/zh/12-data-subscription/topic/add-topic-sub-stable.webp new file mode 100644 index 0000000000000000000000000000000000000000..1d68c5fb8e972f9916119ed81df2e3b47871ef00 Binary files /dev/null and b/docs/zh/12-data-subscription/topic/add-topic-sub-stable.webp differ diff --git a/docs/zh/12-data-subscription/topic/add-topic-sub-table.webp b/docs/zh/12-data-subscription/topic/add-topic-sub-table.webp new file mode 100644 index 0000000000000000000000000000000000000000..526bb86711b315c88a86b74511c38457f6efc314 Binary files /dev/null and b/docs/zh/12-data-subscription/topic/add-topic-sub-table.webp differ diff --git a/docs/zh/12-stream.md b/docs/zh/12-stream.md new file mode 100644 index 0000000000000000000000000000000000000000..7e3d723b16c09690054f81729227372546055910 --- /dev/null +++ b/docs/zh/12-stream.md @@ -0,0 +1,112 @@ +--- +sidebar_label: 流式计算 +description: "TDengine 流式计算将数据的写入、预处理、复杂分析、实时计算、报警触发等功能融为一体,是一个能够降低用户部署成本、存储成本和运维成本的计算引擎。" +title: 流式计算 +--- + +在时序数据的处理中,经常要对原始数据进行清洗、预处理,再使用时序数据库进行长久的储存。在传统的时序数据解决方案中,常常需要部署 Kafka、Flink 等流处理系统。而流处理系统的复杂性,带来了高昂的开发与运维成本。 + +TDengine 3.0 的流式计算引擎提供了实时处理写入的数据流的能力,使用 SQL 定义实时流变换,当数据被写入流的源表后,数据会被以定义的方式自动处理,并根据定义的触发模式向目的表推送结果。它提供了替代复杂流处理系统的轻量级解决方案,并能够在高吞吐的数据写入的情况下,提供毫秒级的计算结果延迟。 + +流式计算可以包含数据过滤,标量函数计算(含UDF),以及窗口聚合(支持滑动窗口、会话窗口与状态窗口),可以以超级表、子表、普通表为源表,写入到目的超级表。在创建流时,目的超级表将被自动创建,随后新插入的数据会被流定义的方式处理并写入其中,通过 partition by 子句,可以以表名或标签划分 partition,不同的 partition 将写入到目的超级表的不同子表。 + +TDengine 的流式计算能够支持分布在多个 vnode 中的超级表聚合;还能够处理乱序数据的写入:它提供了 watermark 机制以度量容忍数据乱序的程度,并提供了 ignore expired 配置项以决定乱序数据的处理策略——丢弃或者重新计算。 + +详见 [流式计算](../../taos-sql/stream) + +## 流式计算的创建 + +```sql +CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name AS subquery +stream_options: { + TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time] + WATERMARK time +} +``` + +详细的语法规则参考 [流式计算](../../taos-sql/stream) + +## 示例一 + +企业电表的数据经常都是成百上千亿条的,那么想要将这些分散、凌乱的数据清洗或转换都需要比较长的时间,很难做到高效性和实时性,以下例子中,通过流计算可以将电表电压大于 220V 的数据清洗掉,然后以 5 秒为窗口整合并计算出每个窗口中电流的最大值,最后将结果输出到指定的数据表中。 + +### 为原始数据创建 DB + +在 TDengine Cloud 的数据浏览器里面创建数据库 `power`,然后如下创建四张超级表: + +```sql +CREATE STABLE power.meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int); + +CREATE TABLE power.d101 USING power.meters TAGS ("California.SanFrancisco", 2); +CREATE TABLE power.d102 USING power.meters TAGS ("California.SanFrancisco", 3); +CREATE TABLE power.d103 USING power.meters TAGS ("California.LosAngeles", 2); +CREATE TABLE power.d104 USING power.meters TAGS ("California.LosAngeles", 3); +``` + +### 创建流 + +```sql +create stream current_stream into power.current_stream_output_stb as select _wstart as wstart, _wend as wend, max(current) as max_current from power.meters where voltage <= 220 interval (5s); +``` + +### 写入数据 +```sql +insert into power.d101 values("2018-10-03 14:38:05.000", 10.30000, 219, 0.31000); +insert into power.d101 values("2018-10-03 14:38:15.000", 12.60000, 218, 0.33000); +insert into power.d101 values("2018-10-03 14:38:16.800", 12.30000, 221, 0.31000); +insert into power.d102 values("2018-10-03 14:38:16.650", 10.30000, 218, 0.25000); +insert into power.d103 values("2018-10-03 14:38:05.500", 11.80000, 221, 0.28000); +insert into power.d103 values("2018-10-03 14:38:16.600", 13.40000, 223, 0.29000); +insert into power.d104 values("2018-10-03 14:38:05.000", 10.80000, 223, 0.29000); +insert into power.d104 values("2018-10-03 14:38:06.500", 11.50000, 221, 0.35000); +``` + +### 查询以观察结果 + +```sql title="SQL" +select start, end, max_current from power.current_stream_output_stb; +``` + +```txt title="output" + wstart | wend | max_current | +=========================================================================== + 2018-10-03 14:38:05.000 | 2018-10-03 14:38:10.000 | 10.30000 | + 2018-10-03 14:38:15.000 | 2018-10-03 14:38:20.000 | 12.60000 | +Query OK, 2 rows in database (0.018762s) +``` + +## 示例二 + +依然以示例一中的数据为基础,我们已经采集到了每个智能电表的电流和电压数据,现在需要求出有功功率和无功功率,并将地域和电表名以符号 "." 拼接,然后以电表名称分组输出到新的数据表中。 + +### 创建 DB 和原始数据表 + +使用上一个示例的方式来创建数据库。 + +### 创建流 + +```sql +create stream power_stream into power.power_stream_output_stb as select ts, concat_ws(".", location, tbname) as meter_location, current*voltage*cos(phase) as active_power, current*voltage*sin(phase) as reactive_power from power.meters partition by tbname; +``` + +### 写入数据 + +参考示例一的方式来写入数据。 + +### 查询以观察结果 +```sql title="SQL" +select ts, meter_location, active_power, reactive_power from power.power_stream_output_stb; +``` +```txt title="output" + ts | meter_location | active_power | reactive_power | +=================================================================================================================== + 2018-10-03 14:38:05.000 | California.LosAngeles.d104 | 2307.834596289 | 688.687331847 | + 2018-10-03 14:38:06.500 | California.LosAngeles.d104 | 2387.415754896 | 871.474763418 | + 2018-10-03 14:38:05.500 | California.LosAngeles.d103 | 2506.240411679 | 720.680274962 | + 2018-10-03 14:38:16.600 | California.LosAngeles.d103 | 2863.424274422 | 854.482390839 | + 2018-10-03 14:38:05.000 | California.SanFrancisco.d101 | 2148.178871730 | 688.120784090 | + 2018-10-03 14:38:15.000 | California.SanFrancisco.d101 | 2598.589176205 | 890.081451418 | + 2018-10-03 14:38:16.800 | California.SanFrancisco.d101 | 2588.728381186 | 829.240910475 | + 2018-10-03 14:38:16.650 | California.SanFrancisco.d102 | 2175.595991997 | 555.520860397 | +Query OK, 8 rows in database (0.014753s) +``` diff --git a/docs/zh/13-replication/index.md b/docs/zh/13-replication/index.md new file mode 100644 index 0000000000000000000000000000000000000000..8bb951ca6ec4439584556bbe915717d0b6bc3671 --- /dev/null +++ b/docs/zh/13-replication/index.md @@ -0,0 +1,9 @@ +--- +sidebar_label: 数据复制 +title: 数据复制 +description: 在不同的 TDengine Cloud 实例之间进行数据复制 +--- + +TDengine Cloud 提供对数据复制的全面支持。您可以将数据从 TDengine Cloud 复制到私人TDengine实例,从私人TDengine实例复制到TDengine云,或者从一个云平台复制到另一个云平台,这两个服务在哪个云或区域并不重要。 + +TDengine Cloud 也提供数据备份。 diff --git a/docs/zh/19-tools/01-cli.md b/docs/zh/19-tools/01-cli.md new file mode 100644 index 0000000000000000000000000000000000000000..d024df14567148c1b69547decf8c7deb917ed800 --- /dev/null +++ b/docs/zh/19-tools/01-cli.md @@ -0,0 +1,126 @@ +--- +title: TDengine 命令行(CLI) +sidebar_label: TDengine CLI +description: TDengine CLI 的使用说明和技巧 +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + + +TDengine 命令行程序(以下简称 TDengine CLI)是用户操作 TDengine 实例并与之交互的最简洁最常用的方式。 + +## 安装 + +运行 TDengine CLI 来访问 TDengine Cloud ,请首先下载和安装最新的 [TDengine 客户端安装包](https://docs.taosdata.com/releases/tdengine/)。 + +## 配置 + + + + +在您的 Linux 终端里面执行下面的命令设置 TDengine Cloud 的 DSN 为环境变量: + +```bash +export TDENGINE_CLOUD_DSN="" +``` + + + + +在您的 Windows CMD 里面执行下面的命令设置 TDengine Cloud 的 DSN 为环境变量: + +```cmd +set TDENGINE_CLOUD_DSN= +``` + +或者在您的 Windows PowerShell 里面执行下面的命令设置 TDengine Cloud 的 DSN 为环境变量: + +```cmd +$env:TDENGINE_CLOUD_DSN='' +``` + + + + +在您的 Mac 里面执行下面的命令设置 TDengine Cloud 的 DSN 为环境变量: + +```bash +export TDENGINE_CLOUD_DSN="" +``` + + + + + +:::note +获取真实的 `DSN` 的值,请登录[TDengine Cloud](https://cloud.taosdata.com) 后点击左边的”工具“菜单,然后选择”TDengine CLI“。 +::: + + +## 建立连接 + + + + +To access the TDengine Cloud, you can execute `taos` if you already set the environment variable. + +``` +taos +``` + +如果您没有设置 TDengine Cloud 实例的环境变量,或者您想访问其他 TDengine Cloud 实例,您可以使用下面的命令 `taos -E `来执行: + +``` +taos -E $TDENGINE_CLOUD_DSN +``` + + + + +如果您已经设置了环境变量,您只需要立即执行 `taos` 命令就可以访问 TDengine Cloud 实例。 + +```powershell +taos.exe +``` + +如果您没有设置 TDengine Cloud 实例的环境变量,或者您想访问其他 TDengine Cloud 实例,您可以使用下面的命令 `taos -E `来执行: + +```powershell +taos.exe -E $TDENGINE_CLOUD_DSN +``` + + + + +如果您已经设置了环境变量,您只需要立即执行 `taos` 命令就可以访问 TDengine Cloud 实例。 + +```bash +taos +``` + +如果您没有设置 TDengine Cloud 实例的环境变量,或者您想访问其他 TDengine Cloud 实例,您可以使用下面的命令 `taos -E `来执行: + +```bash +taos -E $TDENGINE_CLOUD_DSN +``` + + + + +## 使用 TDengine CLI + +如果成功连接上 TDengine 服务,TDengine CLI 会显示一个欢迎的消息和版本信息。如果失败了,TDengine CLI 会打印失败消息。TDengine CLI 打印的成功消息如下: + +```text +Welcome to the TDengine shell from Linux, Client Version:3.0.0.0 +Copyright (c) 2022 by TAOS Data, Inc. All rights reserved. + +Successfully connect to cloud.tdengine.com:8085 in restful mode + +taos> +``` + +进入 TDengine CLI 以后,您就可以执行大量的 SQL 命令来进行插入,查询或者进行管理。详情请参考[官方文档](https://docs.taosdata.com/reference/taos-shell#execute-sql-script-file)。 diff --git a/docs/zh/19-tools/03-taosbenchmark.md b/docs/zh/19-tools/03-taosbenchmark.md new file mode 100644 index 0000000000000000000000000000000000000000..15fb3a611eb437fd4af6eae0984183396f5684ea --- /dev/null +++ b/docs/zh/19-tools/03-taosbenchmark.md @@ -0,0 +1,398 @@ +--- +title: taosBenchmark +sidebar_label: taosBenchmark +toc_max_heading_level: 4 +description: 'taosBenchmark (曾用名 taosdemo ) 是一个用于测试 TDengine 产品性能的工具' +--- + +## 简介 + +taosBenchmark (曾用名 taosdemo ) 是一个用于测试 TDengine 产品性能的工具。taosBenchmark 可以测试 TDengine 的插入、查询和订阅等功能的性能,它可以模拟由大量设备产生的大量数据,还可以灵活地控制数据库、超级表、标签列的数量和类型、数据列的数量和类型、子表的数量、每张子表的数据量、插入数据的时间间隔、taosBenchmark 的工作线程数量、是否以及如何插入乱序数据等。为了兼容过往用户的使用习惯,安装包提供 了 taosdemo 作为 taosBenchmark 的软链接。 + +:::note +在使用 TDengine Cloud 的时候,请注意,没有授权的用户是没有办法通过任何工具包括 taosBenchmark 来创建数据库的。只能通过 TDengine Cloud 的数据浏览器来创建数据库。这个文档中提到的任何创建数据库的内容请忽略,并在 TDengine Cloud 里面手动创建数据库。 +::: + +## 安装 + +taosBenchmark 有两种安装方式: + +- 安装 TDengine 官方安装包的同时会自动安装 taosBenchmark, 详情请参考[ TDengine 安装](/operation/pkg-install)。 + +- 单独编译 taos-tools 并安装, 详情请参考 [taos-tools](https://github.com/taosdata/taos-tools) 仓库。 + +## 运行 + +### 配置和运行方式 + +运行下面命令来设置TDengine Cloud 的 DSN 环境变量: + +```bash +export TDENGINE_CLOUD_DSN="" +``` + + +:::note +获取真实的 `DSN` 的值,请登录[TDengine Cloud](https://cloud.taosdata.com) 后点击左边的”工具“菜单,然后选择”taosBenchmark“。 +::: + + +用户只能使用一个命令行参数 `-f ` 指定配置文件。 + +taosBenchmark 支持对 TDengine 做完备的性能测试,其所支持的 TDengine 功能分为三大类:写入、查询和订阅。这三种功能之间是互斥的,每次运行 taosBenchmark 只能选择其中之一。值得注意的是,所要测试的功能类型在使用命令行配置方式时是不可配置的,命令行配置方式只能测试写入性能。若要测试 TDengine 的查询和订阅性能,必须使用配置文件的方式,通过配置文件中的参数 `filetype` 指定所要测试的功能类型。 + +**在运行 taosBenchmark 之前要确保 TDengine 集群已经在正确运行。** + +### 使用配置文件运行 + +taosBenchmark 安装包中提供了配置文件的示例,位于 `/examples/taosbenchmark-json` 下 + +使用如下命令行即可运行 taosBenchmark 并通过配置文件控制其行为。 + +```bash +taosBenchmark -f +``` + +**下面是几个配置文件的示例:** + +#### 插入场景 JSON 配置文件示例 +```json +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "connection_pool_size": 8, + "thread_count": 4, + "create_table_thread_count": 7, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 100, + "prepared_rand": 10000, + "chinese": "no", + "databases": [ + { + "dbinfo": { + "name": "test", + "drop": "no", + "replica": 1, + "precision": "ms", + "keep": 3650, + "minRows": 100, + "maxRows": 4096, + "comp": 2 + }, + "super_tables": [ + { + "name": "meters", + "child_table_exists": "no", + "childtable_count": 10000, + "childtable_prefix": "d", + "escape_character": "yes", + "auto_create_table": "no", + "batch_create_tbl_num": 5, + "data_source": "rand", + "insert_mode": "taosc", + "non_stop_mode": "no", + "line_protocol": "line", + "insert_rows": 10000, + "childtable_limit": 10, + "childtable_offset": 100, + "interlace_rows": 0, + "insert_interval": 0, + "partial_col_num": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "columns": [ + { + "type": "FLOAT", + "name": "current", + "count": 1, + "max": 12, + "min": 8 + }, + { "type": "INT", "name": "voltage", "max": 225, "min": 215 }, + { "type": "FLOAT", "name": "phase", "max": 1, "min": 0 } + ], + "tags": [ + { + "type": "TINYINT", + "name": "groupid", + "max": 10, + "min": 1 + }, + { + "name": "location", + "type": "BINARY", + "len": 16, + "values": ["San Francisco", "Los Angles", "San Diego", + "San Jose", "Palo Alto", "Campbell", "Mountain View", + "Sunnyvale", "Santa Clara", "Cupertino"] + } + ] + } + ] + } + ] +} + +``` + +#### 查询场景 JSON 配置文件示例 + +```json +{ + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "test", + "query_times": 2, + "query_mode": "taosc", + "specified_table_query": { + "query_interval": 1, + "concurrent": 3, + "sqls": [ + { + "sql": "select last_row(*) from meters", + "result": "./query_res0.txt" + }, + { + "sql": "select count(*) from d0", + "result": "./query_res1.txt" + } + ] + }, + "super_table_query": { + "stblname": "meters", + "query_interval": 1, + "threads": 3, + "sqls": [ + { + "sql": "select last_row(ts) from xxxx", + "result": "./query_res2.txt" + } + ] + } +} + +``` + +## 配置文件参数详解 + +### 通用配置参数 + +本节所列参数适用于所有功能模式。 + +- **filetype** : 要测试的功能,可选值为 `insert`, `query` 和 `subscribe`。分别对应插入、查询和订阅功能。每个配置文件中只能指定其中之一。 +- **cfgdir** : TDengine 客户端配置文件所在的目录,默认路径是 /etc/taos 。 + +- **host** : 指定要连接的 TDengine 服务端的 FQDN,默认值为 localhost。 + +- **port** : 要连接的 TDengine 服务器的端口号,默认值为 6030。 + +- **user** : 用于连接 TDengine 服务端的用户名,默认为 root。 + +- **password** : 用于连接 TDengine 服务端的密码,默认值为 taosdata。 + +### 插入场景配置参数 + +插入场景下 `filetype` 必须设置为 `insert`,该参数及其它通用参数详见[通用配置参数](#通用配置参数) + +- ** keep_trying ** : 失败后进行重试的次数,默认不重试。需使用 v3.0.9 以上版本。 + +- ** trying_interval ** : 失败重试间隔时间,单位为毫秒,仅在 keep_trying 指定重试后有效。需使用 v3.0.9 以上版本。 + +#### 流式计算相关配置参数 + +创建流式计算的相关参数在 json 配置文件中的 `stream` 中配置,具体参数如下。 + +- **stream_name** : 流式计算的名称,必填项。 + +- **stream_stb** : 流式计算对应的超级表名称,必填项。 + +- **stream_sql** : 流式计算的sql语句,必填项。 + +- **trigger_mode** : 流式计算的触发模式,可选项。 + +- **watermark** : 流式计算的水印,可选项。 + +- **drop** : 是否创建流式计算,可选项为 "yes" 或者 "no", 为 "no" 时不创建。 + +#### 超级表相关配置参数 + +创建超级表时的相关参数在 json 配置文件中的 `super_tables` 中配置,具体参数如下。 + +- **name**: 超级表名,必须配置,没有默认值。 + +- **child_table_exists** : 子表是否已经存在,默认值为 "no",可选值为 "yes" 或 "no"。 + +- **child_table_count** : 子表的数量,默认值为 10。 + +- **child_table_prefix** : 子表名称的前缀,必选配置项,没有默认值。 + +- **escape_character** : 超级表和子表名称中是否包含转义字符,默认值为 "no",可选值为 "yes" 或 "no"。 + +- **auto_create_table** : 仅当 insert_mode 为 taosc, rest, stmt 并且 childtable_exists 为 "no" 时生效,该参数为 "yes" 表示 taosBenchmark 在插入数据时会自动创建不存在的表;为 "no" 则表示先提前建好所有表再进行插入。 + +- **batch_create_tbl_num** : 创建子表时每批次的建表数量,默认为 10。注:实际的批数不一定与该值相同,当执行的 SQL 语句大于支持的最大长度时,会自动截断再执行,继续创建。 + +- **data_source** : 数据的来源,默认为 taosBenchmark 随机产生,可以配置为 "rand" 和 "sample"。为 "sample" 时使用 sample_file 参数指定的文件内的数据。 + +- **insert_mode** : 插入模式,可选项有 taosc, rest, stmt, sml, sml-rest, 分别对应普通写入、restful 接口写入、参数绑定接口写入、schemaless 接口写入、restful schemaless 接口写入 (由 taosAdapter 提供)。默认值为 taosc 。 + +- **non_stop_mode** : 指定是否持续写入,若为 "yes" 则 insert_rows 失效,直到 Ctrl + C 停止程序,写入才会停止。默认值为 "no",即写入指定数量的记录后停止。注:即使在持续写入模式下 insert_rows 失效,但其也必须被配置为一个非零正整数。 + +- **line_protocol** : 使用行协议插入数据,仅当 insert_mode 为 sml 或 sml-rest 时生效,可选项为 line, telnet, json。 + +- **tcp_transfer** : telnet 模式下的通信协议,仅当 insert_mode 为 sml-rest 并且 line_protocol 为 telnet 时生效。如果不配置,则默认为 http 协议。 + +- **insert_rows** : 每个子表插入的记录数,默认为 0 。 + +- **childtable_offset** : 仅当 childtable_exists 为 yes 时生效,指定从超级表获取子表列表时的偏移量,即从第几个子表开始。 + +- **childtable_limit** : 仅当 childtable_exists 为 yes 时生效,指定从超级表获取子表列表的上限。 + +- **interlace_rows** : 启用交错插入模式并同时指定向每个子表每次插入的数据行数。交错插入模式是指依次向每张子表插入由本参数所指定的行数并重复这个过程,直到所有子表的数据都插入完成。默认值为 0, 即向一张子表完成数据插入后才会向下一张子表进行数据插入。 + +- **insert_interval** : 指定交错插入模式的插入间隔,单位为 ms,默认值为 0。 只有当 `-B/--interlace-rows` 大于 0 时才起作用。意味着数据插入线程在为每个子表插入隔行扫描记录后,会等待该值指定的时间间隔后再进行下一轮写入。 + +- **partial_col_num** : 若该值为正数 n 时, 则仅向前 n 列写入,仅当 insert_mode 为 taosc 和 rest 时生效,如果 n 为 0 则是向全部列写入。 + +- **disorder_ratio** : 指定乱序数据的百分比概率,其值域为 [0,50]。默认为 0,即没有乱序数据。 + +- **disorder_range** : 指定乱序数据的时间戳回退范围。所生成的乱序时间戳为非乱序情况下应该使用的时间戳减去这个范围内的一个随机值。仅在 `-O/--disorder` 指定的乱序数据百分比大于 0 时有效。 + +- **timestamp_step** : 每个子表中插入数据的时间戳步长,单位与数据库的 `precision` 一致,默认值是 1。 + +- **start_timestamp** : 每个子表的时间戳起始值,默认值是 now。 + +- **sample_format** : 样本数据文件的类型,现在只支持 "csv" 。 + +- **sample_file** : 指定 csv 格式的文件作为数据源,仅当 data_source 为 sample 时生效。若 csv 文件内的数据行数小于等于 prepared_rand,那么会循环读取 csv 文件数据直到与 prepared_rand 相同;否则则会只读取 prepared_rand 个数的行的数据。也即最终生成的数据行数为二者取小。 + +- **use_sample_ts** : 仅当 data_source 为 sample 时生效,表示 sample_file 指定的 csv 文件内是否包含第一列时间戳,默认为 no。 若设置为 yes, 则使用 csv 文件第一列作为时间戳,由于同一子表时间戳不能重复,生成的数据量取决于 csv 文件内的数据行数相同,此时 insert_rows 失效。 + +- **tags_file** : 仅当 insert_mode 为 taosc, rest 的模式下生效。 最终的 tag 的数值与 childtable_count 有关,如果 csv 文件内的 tag 数据行小于给定的子表数量,那么会循环读取 csv 文件数据直到生成 childtable_count 指定的子表数量;否则则只会读取 childtable_count 行 tag 数据。也即最终生成的子表数量为二者取小。 + +#### tsma配置参数 + +指定tsma的配置参数在 `super_tables` 中的 `tsmas` 中,具体参数如下。 + +- **name** : 指定 tsma 的名字,必选项。 + +- **function** : 指定 tsma 的函数,必选项。 + +- **interval** : 指定 tsma 的时间间隔,必选项。 + +- **sliding** : 指定 tsma 的窗口时间位移,必选项。 + +- **custom** : 指定 tsma 的创建语句结尾追加的自定义配置,可选项。 + +- **start_when_inserted** : 指定当插入多少行时创建 tsma,可选项,默认为 0。 + +#### 标签列与数据列配置参数 + +指定超级表标签列与数据列的配置参数分别在 `super_tables` 中的 `columns` 和 `tag` 中。 + +- **type** : 指定列类型,可选值请参考 TDengine 支持的数据类型。 + 注:JSON 数据类型比较特殊,只能用于标签,当使用 JSON 类型作为 tag 时有且只能有这一个标签,此时 count 和 len 代表的意义分别是 JSON tag 内的 key-value pair 的个数和每个 KV pair 的 value 的值的长度,value 默认为 string。 + +- **len** : 指定该数据类型的长度,对 NCHAR,BINARY 和 JSON 数据类型有效。如果对其他数据类型配置了该参数,若为 0 , 则代表该列始终都是以 null 值写入;如果不为 0 则被忽略。 + +- **count** : 指定该类型列连续出现的数量,例如 "count": 4096 即可生成 4096 个指定类型的列。 + +- **name** : 列的名字,若与 count 同时使用,比如 "name":"current", "count":3, 则 3 个列的名字分别为 current, current_2. current_3。 + +- **min** : 数据类型的 列/标签 的最小值。生成的值将大于或等于最小值。 + +- **max** : 数据类型的 列/标签 的最大值。生成的值将小于最小值。 + +- **values** : nchar/binary 列/标签的值域,将从值中随机选择。 + +- **sma**: 将该列加入 SMA 中,值为 "yes" 或者 "no",默认为 "no"。 + +#### 插入行为配置参数 + +- **thread_count** : 插入数据的线程数量,默认为 8。 + +- **create_table_thread_count** : 建表的线程数量,默认为 8。 + +- **connection_pool_size** : 预先建立的与 TDengine 服务端之间的连接的数量。若不配置,则与所指定的线程数相同。 + +- **result_file** : 结果输出文件的路径,默认值为 ./output.txt。 + +- **confirm_parameter_prompt** : 开关参数,要求用户在提示后确认才能继续。默认值为 false 。 + +- **interlace_rows** : 启用交错插入模式并同时指定向每个子表每次插入的数据行数。交错插入模式是指依次向每张子表插入由本参数所指定的行数并重复这个过程,直到所有子表的数据都插入完成。默认值为 0, 即向一张子表完成数据插入后才会向下一张子表进行数据插入。 + 在 `super_tables` 中也可以配置该参数,若配置则以 `super_tables` 中的配置为高优先级,覆盖全局设置。 + +- **insert_interval** : + 指定交错插入模式的插入间隔,单位为 ms,默认值为 0。 只有当 `-B/--interlace-rows` 大于 0 时才起作用。意味着数据插入线程在为每个子表插入隔行扫描记录后,会等待该值指定的时间间隔后再进行下一轮写入。 + 在 `super_tables` 中也可以配置该参数,若配置则以 `super_tables` 中的配置为高优先级,覆盖全局设置。 + +- **num_of_records_per_req** : + 每次向 TDengine 请求写入的数据行数,默认值为 30000 。当其设置过大时,TDengine 客户端驱动会返回相应的错误信息,此时需要调低这个参数的设置以满足写入要求。 + +- **prepare_rand** : 生成的随机数据中唯一值的数量。若为 1 则表示所有数据都相同。默认值为 10000 。 + +### 查询场景配置参数 + +查询场景下 `filetype` 必须设置为 `query`。 + +查询场景可以通过设置 `kill_slow_query_threshold` 和 `kill_slow_query_interval` 参数来控制杀掉慢查询语句的执行,threshold 控制如果 exec_usec 超过指定时间的查询将被 taosBenchmark 杀掉,单位为秒;interval 控制休眠时间,避免持续查询慢查询消耗 CPU ,单位为秒。 + +其它通用参数详见[通用配置参数](#通用配置参数)。 + +#### 执行指定查询语句的配置参数 + +查询子表或者普通表的配置参数在 `specified_table_query` 中设置。 + +- **query_interval** : 查询时间间隔,单位是秒,默认值为 0。 + +- **threads** : 执行查询 SQL 的线程数,默认值为 1。 + +- **sqls**: + - **sql**: 执行的 SQL 命令,必填。 + - **result**: 保存查询结果的文件,未指定则不保存。 + +#### 查询超级表的配置参数 + +查询超级表的配置参数在 `super_table_query` 中设置。 + +- **stblname** : 指定要查询的超级表的名称,必填。 + +- **query_interval** : 查询时间间隔,单位是秒,默认值为 0。 + +- **threads** : 执行查询 SQL 的线程数,默认值为 1。 + +- **sqls** : + - **sql** : 执行的 SQL 命令,必填;对于超级表的查询 SQL,在 SQL 命令中保留 "xxxx",程序会自动将其替换为超级表的所有子表名。 + 替换为超级表中所有的子表名。 + - **result** : 保存查询结果的文件,未指定则不保存。 + +### 订阅场景配置参数 + +订阅场景下 `filetype` 必须设置为 `subscribe`,该参数及其它通用参数详见[通用配置参数](#通用配置参数) + +#### 执行指定订阅语句的配置参数 + +订阅子表或者普通表的配置参数在 `specified_table_query` 中设置。 + +- **threads/concurrent** : 执行 SQL 的线程数,默认为 1。 + +- **sqls** : + - **sql** : 执行的 SQL 命令,必填。 diff --git a/docs/zh/19-tools/06-taosdump.md b/docs/zh/19-tools/06-taosdump.md new file mode 100644 index 0000000000000000000000000000000000000000..5f45c0468fe33e83a707936cb66de1d8bf409c21 --- /dev/null +++ b/docs/zh/19-tools/06-taosdump.md @@ -0,0 +1,135 @@ +--- +title: taosdump +description: "taosdump 是一个支持从运行中的 TDengine 集群备份数据并将备份的数据恢复到相同或另一个运行中的 TDengine 集群中的工具应用程序" +--- + +## 简介 + +taosdump 是一个支持从运行中的 TDengine 集群备份数据并将备份的数据恢复到相同或另一个运行中的 TDengine 集群中的工具应用程序。 + +taosdump 可以用数据库、超级表或普通表作为逻辑数据单元进行备份,也可以对数据库、超级 +表和普通表中指定时间段内的数据记录进行备份。使用时可以指定数据备份的目录路径,如果 +不指定位置,taosdump 默认会将数据备份到当前目录。 + +如果指定的位置已经有数据文件,taosdump 会提示用户并立即退出,避免数据被覆盖。这意味着同一路径只能被用于一次备份。 +如果看到相关提示,请小心操作。 + +taosdump 是一个逻辑备份工具,它不应被用于备份任何原始数据、环境设置、 +硬件信息、服务端配置或集群的拓扑结构。taosdump 使用 +[ Apache AVRO ](https://avro.apache.org/)作为数据文件格式来存储备份数据。 + +## 安装 + +taosdump 有两种安装方式: + +- 安装 taosTools 官方安装包, 请从[发布历史页面](https://docs.taosdata.com/releases/tools/)页面找到 taosTools 并下载安装。 + +- 单独编译 taos-tools 并安装, 详情请参考 [taos-tools](https://github.com/taosdata/taos-tools) 仓库。 + +运行下面命令来设置TDengine Cloud 的 DSN 环境变量: + +```bash +export TDENGINE_CLOUD_DSN="" +``` + + +:::note +获取真实的 `DSN` 的值,请登录[TDengine Cloud](https://cloud.taosdata.com) 后点击左边的”工具“菜单,然后选择”taosdump“。 +::: + + +## 常用使用场景 + +### taosdump 备份数据 + +1. 备份所有数据库:指定 `-A` 或 `--all-databases` 参数; +2. 备份多个指定数据库:使用 `-D db1,db2,...` 参数; +3. 备份指定数据库中的某些超级表或普通表:使用 `dbname stbname1 stbname2 tbname1 tbname2 ...` 参数,注意这种输入序列第一个参数为数据库名称,且只支持一个数据库,第二个和之后的参数为该数据库中的超级表或普通表名称,中间以空格分隔; +4. 备份系统 log 库:TDengine 集群通常会包含一个系统数据库,名为 `log`,这个数据库内的数据为 TDengine 自我运行的数据,taosdump 默认不会对 log 库进行备份。如果有特定需求对 log 库进行备份,可以使用 `-a` 或 `--allow-sys` 命令行参数。 +5. “宽容”模式备份:taosdump 1.4.1 之后的版本提供 `-n` 参数和 `-L` 参数,用于备份数据时不使用转义字符和“宽容”模式,可以在表名、列名、标签名没使用转义字符的情况下减少备份数据时间和备份数据占用空间。如果不确定符合使用 `-n` 和 `-L` 条件时请使用默认参数进行“严格”模式进行备份。转义字符的说明请参考[官方文档](/taos-sql/escape)。 + +:::tip +- taosdump 1.4.1 之后的版本提供 `-I` 参数,用于解析 avro 文件 schema 和数据,如果指定 `-s` 参数将只解析 schema。 +- taosdump 1.4.2 之后的备份使用 `-B` 参数指定的批次数,默认值为 16384,如果在某些环境下由于网络速度或磁盘性能不足导致 "Error actual dump .. batch .." 可以通过 `-B` 参数调整为更小的值进行尝试。 +- taosdump 的导出不支持中断恢复,所以当进程意外终止后,正确的处理方式是删除当前已导出或生成的所有相关文件。 +- taosdump 的导入支持中断恢复,但是当进程重新启动时,会收到一些“表已经存在”的提示,可以忽视。 + +::: + +### taosdump 恢复数据 + +恢复指定路径下的数据文件:使用 `-i` 参数加上数据文件所在路径。如前面提及,不应该使用同一个目录备份不同数据集合,也不应该在同一路径多次备份同一数据集,否则备份数据会造成覆盖或多次备份。 + +:::tip +taosdump 内部使用 TDengine stmt binding API 进行恢复数据的写入,为提高数据恢复性能,目前使用 16384 为一次写入批次。如果备份数据中有比较多列数据,可能会导致产生 "WAL size exceeds limit" 错误,此时可以通过使用 `-B` 参数调整为一个更小的值进行尝试。 + +::: + +## 详细命令行参数列表 + +以下为 taosdump 详细命令行参数列表: + +``` +Usage: taosdump [OPTION...] dbname [tbname ...] + or: taosdump [OPTION...] --databases db1,db2,... + or: taosdump [OPTION...] --all-databases + or: taosdump [OPTION...] -i inpath + or: taosdump [OPTION...] -o outpath + + -h, --host=HOST Server host dumping data from. Default is + localhost. + -p, --password User password to connect to server. Default is + taosdata. + -P, --port=PORT Port to connect + -u, --user=USER User name used to connect to server. Default is + root. + -c, --config-dir=CONFIG_DIR Configure directory. Default is /etc/taos + -i, --inpath=INPATH Input file path. + -o, --outpath=OUTPATH Output file path. + -r, --resultFile=RESULTFILE DumpOut/In Result file path and name. + -a, --allow-sys Allow to dump system database + -A, --all-databases Dump all databases. + -D, --databases=DATABASES Dump inputted databases. Use comma to separate + databases' name. + -N, --without-property Dump database without its properties. + -s, --schemaonly Only dump tables' schema. + -y, --answer-yes Input yes for prompt. It will skip data file + checking! + -d, --avro-codec=snappy Choose an avro codec among null, deflate, snappy, + and lzma. + -S, --start-time=START_TIME Start time to dump. Either epoch or + ISO8601/RFC3339 format is acceptable. ISO8601 + format example: 2017-10-01T00:00:00.000+0800 or + 2017-10-0100:00:00:000+0800 or '2017-10-01 + 00:00:00.000+0800' + -E, --end-time=END_TIME End time to dump. Either epoch or ISO8601/RFC3339 + format is acceptable. ISO8601 format example: + 2017-10-01T00:00:00.000+0800 or + 2017-10-0100:00:00.000+0800 or '2017-10-01 + 00:00:00.000+0800' + -B, --data-batch=DATA_BATCH Number of data per query/insert statement when + backup/restore. Default value is 16384. If you see + 'error actual dump .. batch ..' when backup or if + you see 'WAL size exceeds limit' error when + restore, please adjust the value to a smaller one + and try. The workable value is related to the + length of the row and type of table schema. + -I, --inspect inspect avro file content and print on screen + -L, --loose-mode Using loose mode if the table name and column name + use letter and number only. Default is NOT. + -n, --no-escape No escape char '`'. Default is using it. + -T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is + 8. + -C, --cloud=CLOUD_DSN specify a DSN to access TDengine cloud service + -R, --restful Use RESTful interface to connect TDengine + -t, --timeout=SECONDS The timeout seconds for websocket to interact. + -g, --debug Print debug info. + -?, --help Give this help list + --usage Give a short usage message + -V, --version Print program version + +Mandatory or optional arguments to long options are also mandatory or optional +for any corresponding short options. + +Report bugs to . +``` diff --git a/docs/zh/19-tools/index.md b/docs/zh/19-tools/index.md new file mode 100644 index 0000000000000000000000000000000000000000..4712476326fc67e642e36f1ab79fae91fe4c14a2 --- /dev/null +++ b/docs/zh/19-tools/index.md @@ -0,0 +1,7 @@ +--- +sidebar_label: 工具 +title: 工具 +description: TDengine 提供的有用工具 +--- + +这一章,主要讲述的是 TDengine 为用户提供的大量有用工具,方便用户更好地使用 TDengine。 diff --git a/docs/zh/22-user-management/01-orgs/images/neworg.webp b/docs/zh/22-user-management/01-orgs/images/neworg.webp new file mode 100644 index 0000000000000000000000000000000000000000..6538c15f1caa17cd89cb017cf3839229e03f572a Binary files /dev/null and b/docs/zh/22-user-management/01-orgs/images/neworg.webp differ diff --git a/docs/zh/22-user-management/01-orgs/images/orglist.webp b/docs/zh/22-user-management/01-orgs/images/orglist.webp new file mode 100644 index 0000000000000000000000000000000000000000..4b13f6b45b241bb5a0c9d0f1ae21265de24dc711 Binary files /dev/null and b/docs/zh/22-user-management/01-orgs/images/orglist.webp differ diff --git a/docs/zh/22-user-management/01-orgs/images/orgname.webp b/docs/zh/22-user-management/01-orgs/images/orgname.webp new file mode 100644 index 0000000000000000000000000000000000000000..7572e30d8cef03c3d2bd080b99aad4887c09c4a5 Binary files /dev/null and b/docs/zh/22-user-management/01-orgs/images/orgname.webp differ diff --git a/docs/zh/22-user-management/01-orgs/images/orgprivileges.webp b/docs/zh/22-user-management/01-orgs/images/orgprivileges.webp new file mode 100644 index 0000000000000000000000000000000000000000..86d15769394e024a1fba054b79556f1875d09b6b Binary files /dev/null and b/docs/zh/22-user-management/01-orgs/images/orgprivileges.webp differ diff --git a/docs/zh/22-user-management/01-orgs/images/orgtransfer.webp b/docs/zh/22-user-management/01-orgs/images/orgtransfer.webp new file mode 100644 index 0000000000000000000000000000000000000000..c8fb9d1cda3a4c1fb43e7084b4e67c30e68da577 Binary files /dev/null and b/docs/zh/22-user-management/01-orgs/images/orgtransfer.webp differ diff --git a/docs/zh/22-user-management/01-orgs/index.md b/docs/zh/22-user-management/01-orgs/index.md new file mode 100644 index 0000000000000000000000000000000000000000..8d1d8ef12205f3e9da1cc62d31eca01490729392 --- /dev/null +++ b/docs/zh/22-user-management/01-orgs/index.md @@ -0,0 +1,33 @@ +--- +sidebar_label: 组织 +title: 组织管理 +description: '组织管理' +--- + +TDengine Cloud 为用户提供了一个列表页来管理您的组织。在这个页面上,您可以得到有权查看或编辑的组织。在组织列表的每一行,您可以得到组织的名称,在组织中的角色和可以进行的操作。 + +![组织列表](./images/orglist.webp) + +在**角色**列中,点击某一行的该值将弹出您在当前组织中所分配角色的详细信息。同时,在**权限**列中,点击某一行的该值也会弹出您在组织层面拥有的权限的详细信息。 + +![组织权限](./images/orgprivileges.webp) + +## 添加新组织 + +您可以点击组织列表右上方的**添加新组织**按钮,会打开**添加新组织**对话框。在打开的对话框中,输入具体的名称,然后点击**创建**按钮,就可以创建一个新的组织。 + +![新组织](./images/neworg.webp) + +## 修改组织名称 + +您可以点击每个组织的**操作**列的**编辑**图标,会打开**编辑组织**的对话框。在打开的对话框中,修改组织名称,然后点击**更改**按钮,保存修改后的组织名称。 + +![修改组织名称](./images/orgname.webp) + +## 转移组织所有者 + +If you select the organization in the left header and you are the owner of it, the same organization line will show you the **Transfer Organization Owner** aciton icon. Then click the icon will show you the **Transfer Organization Owner** dialog. In the opened dialog, you can select another active user of the organization. Then the selected active user will receive a notification email about the tranformation operation. + +如果您选择了左上角组织选择框里面的组织,并且你是该组织的所有者,在组织列表页面的同一组织行将显示**转移组织所有者**图标。然后点击该图标将显示**转移组织所有者**对话框。在打开的对话框中,您可以选择该组织的另一个活跃的用户,点击确定以后,被选中的活跃用户将收到一封关于转移操作的通知邮件。 + +![转移组织所有者](./images/orgtransfer.webp) diff --git a/docs/zh/22-user-management/02-users/01-users.md b/docs/zh/22-user-management/02-users/01-users.md new file mode 100644 index 0000000000000000000000000000000000000000..fbac96182da8f52a73b811b2a59bf5bb4352f4fb --- /dev/null +++ b/docs/zh/22-user-management/02-users/01-users.md @@ -0,0 +1,11 @@ +--- +sidebar_label: 用户 +title: 用户管理 +description: '用户管理包括添加新用户、删除用户和为用户分配特定角色和资源。' +--- + +在**用户**标签,TDengine Cloud 列出了当前组织中的所有用户。您可以添加新的用户,禁用特定的用户以及删除这个用户。您还可以分配或取消组织的任何角色给用户,包括组织、实例或数据库的资源。如果你把组织级别的角色**实例管理员**分配给某个用户,该用户将成为您所有实例的管理员。 + +## Add New User + +您可以点击组织列表右上方的**添加新用户**按钮,打开**添加新用户**对话框。在打开的对话框中,您先要输入新用户的电子邮件,还可以选择现有用户的权限模板。如果这样,所选用户的权限将被复制到对话框的**角色**部分的新用户身上。如果您选择了多个用户组,新用户在被激活后将被添加到这些用户组。在最后的**角色**部分,您可以检查或取消检查特定资源的角色,包括组织、实例和数据库。最后,您可以点击**添加**按钮,在输入邮件通知中的验证码后提交请求,新用户将被添加到用户列表中。 diff --git a/docs/zh/22-user-management/02-users/index.md b/docs/zh/22-user-management/02-users/index.md new file mode 100644 index 0000000000000000000000000000000000000000..e5d39ba2c3765b2aa69bc2fbd1a0db9624ac3005 --- /dev/null +++ b/docs/zh/22-user-management/02-users/index.md @@ -0,0 +1,14 @@ +--- +sidebar_label: User Mgmt +title: User Mgmt +description: 'User management including users, user groups and roles' +--- + +在用户管理页面,您可以向选定的组织添加新的用户、用户组、角色,也可以给指定的用户分配一些资源的角色,如实例或数据库。您还可以编辑或删除指定的用户、用户组、角色或权限。 + +```mdx-code-block +import DocCardList from '@theme/DocCardList'; +import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; + + +``` diff --git a/docs/zh/22-user-management/index.md b/docs/zh/22-user-management/index.md new file mode 100644 index 0000000000000000000000000000000000000000..ab1e5d52c65d5be1e8a89b780e9e9b98f1b14936 --- /dev/null +++ b/docs/zh/22-user-management/index.md @@ -0,0 +1,28 @@ +--- +sidebar_label: 用户管理 +title: 用户管理 +description: 使用用户管理来管理组织、用户、用户组、角色、权限和资源。 +--- + +TDengine Cloud 提供了一个简单而易用的用户管理,包括用户、用户组、角色、权限和资源。TDengine Cloud 默认提供了4个级别的权限和8种默认角色,其中包括组织级别、实例级别、数据库级别和权限管理级别,默认的角色是数据库管理员、账户、数据生产者、数据消费者、开发人员、实例管理员、超级管理员和组织所有者。 您还可以用 TDengine Cloud 已经定义的权限创建自定义角色。 + +组织所有者或超级管理员可以邀请任何其他用户进入组织。而且,他还可以给用户、用户组分配特定的角色,包括实例和数据库等指定资源。在组织级别、实例级别或数据库级别,TDengine 数据所有者通过分配角色(包括数据库读权限)可以很方便得与他人分享数据。并且,您还可以很容易地更新这些权限或删除它们。比如,您可以非常容易为整个实例授予开发人员的权限,以迅速给您的内部团队以充分的访问权,并授予特定相关人员对特定数据库资源的有限访问权。 + +您可以创建多个组织、用户和用户组。并在特定的组织、实例或者数据库中添加用户、用户组。 + +本节介绍了主要的功能和典型的使用案例,以便您对整个用户管理有一个更深入地了解。 + +## 主要功能 + +主要功能如下所示: + +1. [组织管理](./orgs/): 创建新的组织,更新它们的名字,也可以将所有者转移到组织中的其他用户。 +2. [用户管理](./users/): 创建、更新或删除用户或者用户组。您还可以创建/编辑/删除自定义角色。 + - [用户](./users/users) + +```mdx-code-block +import DocCardList from '@theme/DocCardList'; +import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; + + +``` diff --git a/docs/zh/eco_system.webp b/docs/zh/eco_system.webp new file mode 100644 index 0000000000000000000000000000000000000000..1bc754db97e4bc976805ad41050d642cb3c424a7 Binary files /dev/null and b/docs/zh/eco_system.webp differ diff --git a/examples/c/tmq.c b/examples/c/tmq.c new file mode 100644 index 0000000000000000000000000000000000000000..eb41ad039a1852bb265165837d69edc3a2835684 --- /dev/null +++ b/examples/c/tmq.c @@ -0,0 +1,282 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include "taos.h" + +static int running = 1; + +static int32_t msg_process(TAOS_RES* msg) { + char buf[1024]; + int32_t rows = 0; + + const char* topicName = tmq_get_topic_name(msg); + const char* dbName = tmq_get_db_name(msg); + int32_t vgroupId = tmq_get_vgroup_id(msg); + + printf("topic: %s\n", topicName); + printf("db: %s\n", dbName); + printf("vgroup id: %d\n", vgroupId); + + while (1) { + TAOS_ROW row = taos_fetch_row(msg); + if (row == NULL) break; + + TAOS_FIELD* fields = taos_fetch_fields(msg); + int32_t numOfFields = taos_field_count(msg); + // int32_t* length = taos_fetch_lengths(msg); + int32_t precision = taos_result_precision(msg); + rows++; + taos_print_row(buf, row, fields, numOfFields); + printf("precision: %d, row content: %s\n", precision, buf); + } + + return rows; +} + +static int32_t init_env() { + TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); + if (pConn == NULL) { + return -1; + } + + TAOS_RES* pRes; + // drop database if exists + printf("create database\n"); + pRes = taos_query(pConn, "drop topic topicname"); + if (taos_errno(pRes) != 0) { + printf("error in drop tmqdb, reason:%s\n", taos_errstr(pRes)); + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "drop database if exists tmqdb"); + if (taos_errno(pRes) != 0) { + printf("error in drop tmqdb, reason:%s\n", taos_errstr(pRes)); + } + taos_free_result(pRes); + + // create database + pRes = taos_query(pConn, "create database tmqdb precision 'ns'"); + if (taos_errno(pRes) != 0) { + printf("error in create tmqdb, reason:%s\n", taos_errstr(pRes)); + goto END; + } + taos_free_result(pRes); + + // create super table + printf("create super table\n"); + pRes = taos_query( + pConn, "create table tmqdb.stb (ts timestamp, c1 int, c2 float, c3 varchar(16)) tags(t1 int, t3 varchar(16))"); + if (taos_errno(pRes) != 0) { + printf("failed to create super table stb, reason:%s\n", taos_errstr(pRes)); + goto END; + } + taos_free_result(pRes); + + // create sub tables + printf("create sub tables\n"); + pRes = taos_query(pConn, "create table tmqdb.ctb0 using tmqdb.stb tags(0, 'subtable0')"); + if (taos_errno(pRes) != 0) { + printf("failed to create super table ctb0, reason:%s\n", taos_errstr(pRes)); + goto END; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "create table tmqdb.ctb1 using tmqdb.stb tags(1, 'subtable1')"); + if (taos_errno(pRes) != 0) { + printf("failed to create super table ctb1, reason:%s\n", taos_errstr(pRes)); + goto END; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "create table tmqdb.ctb2 using tmqdb.stb tags(2, 'subtable2')"); + if (taos_errno(pRes) != 0) { + printf("failed to create super table ctb2, reason:%s\n", taos_errstr(pRes)); + goto END; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "create table tmqdb.ctb3 using tmqdb.stb tags(3, 'subtable3')"); + if (taos_errno(pRes) != 0) { + printf("failed to create super table ctb3, reason:%s\n", taos_errstr(pRes)); + goto END; + } + taos_free_result(pRes); + + // insert data + printf("insert data into sub tables\n"); + pRes = taos_query(pConn, "insert into tmqdb.ctb0 values(now, 0, 0, 'a0')(now+1s, 0, 0, 'a00')"); + if (taos_errno(pRes) != 0) { + printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes)); + goto END; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "insert into tmqdb.ctb1 values(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11')"); + if (taos_errno(pRes) != 0) { + printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes)); + goto END; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "insert into tmqdb.ctb2 values(now, 2, 2, 'a1')(now+1s, 22, 22, 'a22')"); + if (taos_errno(pRes) != 0) { + printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes)); + goto END; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "insert into tmqdb.ctb3 values(now, 3, 3, 'a1')(now+1s, 33, 33, 'a33')"); + if (taos_errno(pRes) != 0) { + printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes)); + goto END; + } + taos_free_result(pRes); + taos_close(pConn); + return 0; + +END: + taos_free_result(pRes); + taos_close(pConn); + return -1; +} + +int32_t create_topic() { + printf("create topic\n"); + TAOS_RES* pRes; + TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); + if (pConn == NULL) { + return -1; + } + + pRes = taos_query(pConn, "use tmqdb"); + if (taos_errno(pRes) != 0) { + printf("error in use tmqdb, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "create topic topicname as select ts, c1, c2, c3, tbname from tmqdb.stb where c1 > 1"); + if (taos_errno(pRes) != 0) { + printf("failed to create topic topicname, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + taos_close(pConn); + return 0; +} + +void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) { + printf("tmq_commit_cb_print() code: %d, tmq: %p, param: %p\n", code, tmq, param); +} + +tmq_t* build_consumer() { + tmq_conf_res_t code; + tmq_conf_t* conf = tmq_conf_new(); + code = tmq_conf_set(conf, "enable.auto.commit", "true"); + if (TMQ_CONF_OK != code) return NULL; + code = tmq_conf_set(conf, "auto.commit.interval.ms", "1000"); + if (TMQ_CONF_OK != code) return NULL; + code = tmq_conf_set(conf, "group.id", "cgrpName"); + if (TMQ_CONF_OK != code) return NULL; + code = tmq_conf_set(conf, "client.id", "user defined name"); + if (TMQ_CONF_OK != code) return NULL; + code = tmq_conf_set(conf, "td.connect.user", "root"); + if (TMQ_CONF_OK != code) return NULL; + code = tmq_conf_set(conf, "td.connect.pass", "taosdata"); + if (TMQ_CONF_OK != code) return NULL; + code = tmq_conf_set(conf, "auto.offset.reset", "earliest"); + if (TMQ_CONF_OK != code) return NULL; + code = tmq_conf_set(conf, "experimental.snapshot.enable", "false"); + if (TMQ_CONF_OK != code) return NULL; + + tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL); + + tmq_t* tmq = tmq_consumer_new(conf, NULL, 0); + tmq_conf_destroy(conf); + return tmq; +} + +tmq_list_t* build_topic_list() { + tmq_list_t* topicList = tmq_list_new(); + int32_t code = tmq_list_append(topicList, "topicname"); + if (code) { + tmq_list_destroy(topicList); + return NULL; + } + return topicList; +} + +void basic_consume_loop(tmq_t* tmq) { + int32_t totalRows = 0; + int32_t msgCnt = 0; + int32_t timeout = 5000; + while (running) { + TAOS_RES* tmqmsg = tmq_consumer_poll(tmq, timeout); + if (tmqmsg) { + msgCnt++; + totalRows += msg_process(tmqmsg); + taos_free_result(tmqmsg); + } else { + break; + } + } + + fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows); +} + +int main(int argc, char* argv[]) { + int32_t code; + + if (init_env() < 0) { + return -1; + } + + if (create_topic() < 0) { + return -1; + } + + tmq_t* tmq = build_consumer(); + if (NULL == tmq) { + fprintf(stderr, "build_consumer() fail!\n"); + return -1; + } + + tmq_list_t* topic_list = build_topic_list(); + if (NULL == topic_list) { + return -1; + } + + if ((code = tmq_subscribe(tmq, topic_list))) { + fprintf(stderr, "Failed to tmq_subscribe(): %s\n", tmq_err2str(code)); + } + tmq_list_destroy(topic_list); + + basic_consume_loop(tmq); + + code = tmq_consumer_close(tmq); + if (code) { + fprintf(stderr, "Failed to close consumer: %s\n", tmq_err2str(code)); + } else { + fprintf(stderr, "Consumer closed\n"); + } + + return 0; +} diff --git a/tests/script/sh/abs_max.c b/tests/script/sh/abs_max.c deleted file mode 100644 index 9faea60374766e47f82a70880fe2b1376bbc12dc..0000000000000000000000000000000000000000 --- a/tests/script/sh/abs_max.c +++ /dev/null @@ -1,95 +0,0 @@ -#include -#include -#include -#include - -typedef struct SUdfInit{ - int maybe_null; /* 1 if function can return NULL */ - int decimals; /* for real functions */ - int64_t length; /* For string functions */ - char *ptr; /* free pointer for function data */ - int const_item; /* 0 if result is independent of arguments */ -} SUdfInit; - - -#define TSDB_DATA_INT_NULL 0x80000000L -#define TSDB_DATA_BIGINT_NULL 0x8000000000000000L - -void abs_max(char* data, short itype, short ibytes, int numOfRows, int64_t* ts, char* dataOutput, char* interBuf, char* tsOutput, - int* numOfOutput, short otype, short obytes, SUdfInit* buf) { - int i; - int64_t r = 0; - // printf("abs_max input data:%p, type:%d, rows:%d, ts:%p, %" PRId64 ", dataoutput:%p, tsOutput:%p, numOfOutput:%p, buf:%p\n", data, itype, numOfRows, ts, *ts, dataOutput, tsOutput, numOfOutput, buf); - if (itype == 5) { - r=*(int64_t *)dataOutput; - *numOfOutput=0; - - for(i=0;i r) { - r = v; - } - } - - *(int64_t *)dataOutput=r; - - // printf("abs_max out, dataoutput:%" PRId64", numOfOutput:%d\n", *(int64_t *)dataOutput, *numOfOutput); - }else { - *numOfOutput=0; - } -} - - - -void abs_max_finalize(char* dataOutput, char* interBuf, int* numOfOutput, SUdfInit* buf) { - int i; - //int64_t r = 0; - // printf("abs_max_finalize dataoutput:%p:%d, numOfOutput:%d, buf:%p\n", dataOutput, *dataOutput, *numOfOutput, buf); - // *numOfOutput=1; - // printf("abs_max finalize, dataoutput:%" PRId64", numOfOutput:%d\n", *(int64_t *)dataOutput, *numOfOutput); -} - -void abs_max_merge(char* data, int32_t numOfRows, char* dataOutput, int32_t* numOfOutput, SUdfInit* buf) { - int64_t r = 0; - - if (numOfRows > 0) { - r = *((int64_t *)data); - } - // printf("abs_max_merge numOfRows:%d, dataoutput:%p, buf:%p\n", numOfRows, dataOutput, buf); - for (int i = 1; i < numOfRows; ++i) { - // printf("abs_max_merge %d - %" PRId64"\n", i, *((int64_t *)data + i)); - if (*((int64_t*)data + i) > r) { - r= *((int64_t*)data + i); - } - } - - *(int64_t*)dataOutput=r; - if (numOfRows > 0) { - *numOfOutput=1; - } else { - *numOfOutput=0; - } - - // printf("abs_max_merge, dataoutput:%" PRId64", numOfOutput:%d\n", *(int64_t *)dataOutput, *numOfOutput); -} - - -int abs_max_init(SUdfInit* buf) { - // printf("abs_max init\n"); - return 0; -} - - -void abs_max_destroy(SUdfInit* buf) { - // printf("abs_max destroy\n"); -} \ No newline at end of file diff --git a/tests/script/sh/add_one.c b/tests/script/sh/add_one.c deleted file mode 100644 index de2a7d168c88d1a13bf5cbe6a327ea451ddb2546..0000000000000000000000000000000000000000 --- a/tests/script/sh/add_one.c +++ /dev/null @@ -1,31 +0,0 @@ -#include -#include -#include - -typedef struct SUdfInit{ - int maybe_null; /* 1 if function can return NULL */ - int decimals; /* for real functions */ - long long length; /* For string functions */ - char *ptr; /* free pointer for function data */ - int const_item; /* 0 if result is independent of arguments */ -} SUdfInit; - -void add_one(char* data, short itype, short ibytes, int numOfRows, long long* ts, char* dataOutput, char* interBUf, char* tsOutput, - int* numOfOutput, short otype, short obytes, SUdfInit* buf) { - int i; - int r = 0; - // printf("add_one input data:%p, type:%d, rows:%d, ts:%p,%lld, dataoutput:%p, tsOutput:%p, numOfOutput:%p, buf:%p\n", data, itype, numOfRows, ts, *ts, dataOutput, tsOutput, numOfOutput, buf); - if (itype == 4) { - for(i=0;i -#include -#include - -typedef struct SUdfInit{ - int maybe_null; /* 1 if function can return NULL */ - int decimals; /* for real functions */ - long long length; /* For string functions */ - char *ptr; /* free pointer for function data */ - int const_item; /* 0 if result is independent of arguments */ -} SUdfInit; - -void add_one_64232(char* data, short itype, short ibytes, int numOfRows, long long* ts, char* dataOutput, char* interBUf, char* tsOutput, - int* numOfOutput, short otype, short obytes, SUdfInit* buf) { - int i; - int r = 0; - printf("add_one_64232 input data:%p, type:%d, rows:%d, ts:%p,%lld, dataoutput:%p, tsOutput:%p, numOfOutput:%p, buf:%p\n", data, itype, numOfRows, ts, *ts, dataOutput, tsOutput, numOfOutput, buf); - if (itype == 5) { - for(i=0;i +#include +#include +#include "taosudf.h" + + +DLL_EXPORT int32_t bit_and_init() { + return 0; +} + +DLL_EXPORT int32_t bit_and_destroy() { + return 0; +} + +DLL_EXPORT int32_t bit_and(SUdfDataBlock* block, SUdfColumn *resultCol) { + + if (block->numOfCols < 2) { + return TSDB_CODE_UDF_INVALID_INPUT; + } + + for (int32_t i = 0; i < block->numOfCols; ++i) { + SUdfColumn* col = block->udfCols[i]; + if (!(col->colMeta.type == TSDB_DATA_TYPE_INT)) { + return TSDB_CODE_UDF_INVALID_INPUT; + } + } + + SUdfColumnMeta *meta = &resultCol->colMeta; + meta->bytes = 4; + meta->type = TSDB_DATA_TYPE_INT; + meta->scale = 0; + meta->precision = 0; + + + SUdfColumnData *resultData = &resultCol->colData; + + resultData->numOfRows = block->numOfRows; + + for (int32_t i = 0; i < resultData->numOfRows; ++i) { + if (udfColDataIsNull(block->udfCols[0], i)) { + udfColDataSetNull(resultCol, i); + continue; + } + int32_t result = *(int32_t*)udfColDataGetData(block->udfCols[0], i); + int j = 1; + for (; j < block->numOfCols; ++j) { + if (udfColDataIsNull(block->udfCols[j], i)) { + udfColDataSetNull(resultCol, i); + break; + } + + char* colData = udfColDataGetData(block->udfCols[j], i); + result &= *(int32_t*)colData; + } + if (j == block->numOfCols) { + udfColDataSet(resultCol, i, (char*)&result, false); + } + + } + return TSDB_CODE_SUCCESS; +} diff --git a/tests/script/sh/cfg.bat b/tests/script/sh/cfg.bat index 49ab34383a6a6392b40314c912dd2931dbaf7a36..c25bebd159877aa8756465dfe3afdf135d505017 100644 --- a/tests/script/sh/cfg.bat +++ b/tests/script/sh/cfg.bat @@ -16,12 +16,12 @@ rem echo NODE: %NODE% set SCRIPT_DIR=%~dp0..\ rem echo SCRIPT_DIR: %SCRIPT_DIR% -set BUILD_DIR=%SCRIPT_DIR%..\..\..\debug\build\bin\ +echo %cd% | grep community > nul && set "BUILD_DIR=%SCRIPT_DIR%..\..\..\debug\build\bin\" || set "BUILD_DIR=%SCRIPT_DIR%..\..\debug\build\bin\" set TSIM=%BUILD_DIR%tsim rem echo BUILD_DIR: %BUILD_DIR% rem echo TSIM: %TSIM% -set SIM_DIR=%SCRIPT_DIR%..\..\..\sim\ +echo %cd% | grep community > nul && set "SIM_DIR=%SCRIPT_DIR%..\..\..\sim\" || set "SIM_DIR=%SCRIPT_DIR%..\..\sim\" rem echo SIM_DIR: %SIM_DIR% set NODE_DIR=%SIM_DIR%%NODE_NAME%\ diff --git a/tests/script/sh/cfg.sh b/tests/script/sh/cfg.sh index 7d4d747e54bab9ed98b42ab1902f8580e17fca6c..bf58185f6201b5046608881d9c9f1a5b5e8de3ac 100755 --- a/tests/script/sh/cfg.sh +++ b/tests/script/sh/cfg.sh @@ -45,11 +45,7 @@ fi TAOS_DIR=`pwd` TAOSD_DIR=`find . -name "taosd"|grep bin|head -n1` -if [[ "$OS_TYPE" != "Darwin" ]]; then - cut_opt="--field=" -else - cut_opt="-f " -fi +cut_opt="-f " if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2,3` diff --git a/tests/script/sh/checkAsan.sh b/tests/script/sh/checkAsan.sh new file mode 100755 index 0000000000000000000000000000000000000000..31fe80829fa693d8fd3bb5a5da8efbd7ed2e4035 --- /dev/null +++ b/tests/script/sh/checkAsan.sh @@ -0,0 +1,74 @@ +#!/bin/bash + +set +e +#set -x +if [[ "$OSTYPE" == "darwin"* ]]; then + TD_OS="Darwin" +else + OS=$(cat /etc/*-release | grep "^NAME=" | cut -d= -f2) + len=$(echo ${#OS}) + len=$((len-2)) + TD_OS=$(echo -ne ${OS:1:${len}} | cut -d" " -f1) +fi + +if [[ "$TD_OS" == "Alpine" ]]; then + echo -e "os is Alpine,skip check Asan" + exit 0 +fi +unset LD_PRELOAD +SCRIPT_DIR=`dirname $0` +cd $SCRIPT_DIR/../ +SCRIPT_DIR=`pwd` + +IN_TDINTERNAL="community" +if [[ "$SCRIPT_DIR" == *"$IN_TDINTERNAL"* ]]; then + cd ../../.. +else + cd ../../ +fi + +TAOS_DIR=`pwd` +LOG_DIR=$TAOS_DIR/sim/asan + +error_num=`cat ${LOG_DIR}/*.asan | grep "ERROR" | wc -l` +memory_leak=`cat ${LOG_DIR}/*.asan | grep "Direct leak" | wc -l` +indirect_leak=`cat ${LOG_DIR}/*.asan | grep "Indirect leak" | wc -l` +python_error=`cat ${LOG_DIR}/*.info | grep -w "stack" | wc -l` + +# ignore + +# TD-20368 +# /root/TDengine/contrib/zlib/trees.c:873:5: runtime error: null pointer passed as argument 2, which is declared to never be null + +# TD-20494 TD-20452 +# /root/TDengine/source/libs/scalar/src/sclfunc.c:735:11: runtime error: 4.75783e+11 is outside the range of representable values of type 'signed char' +# /root/TDengine/source/libs/scalar/src/sclfunc.c:790:11: runtime error: 3.4e+38 is outside the range of representable values of type 'long int' +# /root/TDengine/source/libs/scalar/src/sclfunc.c:772:11: runtime error: 3.52344e+09 is outside the range of representable values of type 'int' +# /root/TDengine/source/libs/scalar/src/sclfunc.c:753:11: runtime error: 4.75783e+11 is outside the range of representable values of type 'short int' + +# TD-20569 +# /root/TDengine/source/libs/function/src/builtinsimpl.c:856:29: runtime error: signed integer overflow: 9223372036854775806 + 9223372036854775805 cannot be represented in type 'long int' +# /root/TDengine/source/libs/scalar/src/sclvector.c:1075:66: runtime error: signed integer overflow: 9223372034707292160 + 1668838476672 cannot be represented in type 'long int' +# /root/TDengine/source/common/src/tdataformat.c:1876:7: runtime error: signed integer overflow: 8252423483843671206 + 2406154664059062870 cannot be represented in type 'long int' + +runtime_error=`cat ${LOG_DIR}/*.asan | grep "runtime error" | grep -v "trees.c:873" | grep -v "sclfunc.c.*outside the range of representable values of type"| grep -v "signed integer overflow" |grep -v "strerror.c"| grep -v "asan_malloc_linux.cc" |wc -l` + +echo -e "\033[44;32;1m"asan error_num: $error_num"\033[0m" +echo -e "\033[44;32;1m"asan memory_leak: $memory_leak"\033[0m" +echo -e "\033[44;32;1m"asan indirect_leak: $indirect_leak"\033[0m" +echo -e "\033[44;32;1m"asan runtime error: $runtime_error"\033[0m" +echo -e "\033[44;32;1m"asan python error: $python_error"\033[0m" + +let "errors=$error_num+$memory_leak+$indirect_leak+$runtime_error+$python_error" + +if [ $errors -eq 0 ]; then + echo -e "\033[44;32;1m"no asan errors"\033[0m" + exit 0 +else + echo -e "\033[44;31;1m"asan total errors: $errors"\033[0m" + if [ $python_error -ne 0 ]; then + cat ${LOG_DIR}/*.info + fi + cat ${LOG_DIR}/*.asan + exit 1 +fi diff --git a/tests/script/sh/checkValgrind.sh b/tests/script/sh/checkValgrind.sh new file mode 100755 index 0000000000000000000000000000000000000000..fdbac45ea672ee7802dd4ceff9718fac512d2ca9 --- /dev/null +++ b/tests/script/sh/checkValgrind.sh @@ -0,0 +1,59 @@ +#!/bin/bash + +set +e +#set -x + +NODE_NAME= +DETAIL=0 + +while getopts "n:d" arg +do + case $arg in + n) + NODE_NAME=$OPTARG + ;; + d) + DETAIL=1 + ;; + ?) + echo "unkown argument" + ;; + esac +done + +SCRIPT_DIR=`dirname $0` +cd $SCRIPT_DIR/../ +SCRIPT_DIR=`pwd` + +IN_TDINTERNAL="community" +if [[ "$SCRIPT_DIR" == *"$IN_TDINTERNAL"* ]]; then + cd ../../.. +else + cd ../../ +fi + +TAOS_DIR=`pwd` +LOG_DIR=$TAOS_DIR/sim/$NODE_NAME/log + +error_summary=`cat ${LOG_DIR}/valgrind-taosd-*.log | grep "ERROR SUMMARY:" | awk '{print $4}' | awk '{sum+=$1}END{print sum}'` +still_reachable=`cat ${LOG_DIR}/valgrind-taosd-*.log | grep "still reachable in" | wc -l` +definitely_lost=`cat ${LOG_DIR}/valgrind-taosd-*.log | grep "definitely lost in" | wc -l` +indirectly_lost=`cat ${LOG_DIR}/valgrind-taosd-*.log | grep "indirectly lost in " | wc -l` +possibly_lost=`cat ${LOG_DIR}/valgrind-taosd-*.log | grep "possibly lost in " | wc -l` +invalid_read=`cat ${LOG_DIR}/valgrind-taosd-*.log | grep "Invalid read of " | wc -l` +invalid_write=`cat ${LOG_DIR}/valgrind-taosd-*.log | grep "Invalid write of " | wc -l` +invalid_free=`cat ${LOG_DIR}/valgrind-taosd-*.log | grep "Invalid free() " | wc -l` + +if [ $DETAIL -eq 1 ]; then + echo error_summary: $error_summary + echo still_reachable: $still_reachable + echo definitely_lost: $definitely_lost + echo indirectly_lost: $indirectly_lost + echo possibly_lost: $possibly_lost + echo invalid_read: $invalid_read + echo invalid_write: $invalid_write + echo invalid_free: $invalid_free +fi + +let "errors=$error_summary+$still_reachable+$definitely_lost+$indirectly_lost+$possibly_lost+$invalid_read+$invalid_write+$invalid_free" +echo $errors diff --git a/tests/script/sh/clear.sh b/tests/script/sh/clear.sh index 4ee296cf058370552b14c31b67006830e84847dd..587a33f633fbfac4319cf5f6724a29fa60680a91 100755 --- a/tests/script/sh/clear.sh +++ b/tests/script/sh/clear.sh @@ -48,11 +48,7 @@ fi TAOS_DIR=`pwd` TAOSD_DIR=`find . -name "taosd"|grep bin|head -n1` -if [[ "$OS_TYPE" != "Darwin" ]]; then - cut_opt="--field=" -else - cut_opt="-f " -fi +cut_opt="-f " if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2,3` diff --git a/tests/script/sh/compile_udf.sh b/tests/script/sh/compile_udf.sh new file mode 100755 index 0000000000000000000000000000000000000000..5265e5a99b3400ac93702d7ee8a972f5d2893b33 --- /dev/null +++ b/tests/script/sh/compile_udf.sh @@ -0,0 +1,11 @@ +set +e + +rm -rf /tmp/udf/libbitand.so /tmp/udf/libsqrsum.so /tmp/udf/libgpd.so +mkdir -p /tmp/udf +echo "compile udf bit_and and sqr_sum" +gcc -fPIC -shared sh/bit_and.c -I../../include/libs/function/ -I../../include/client -I../../include/util -o /tmp/udf/libbitand.so +gcc -fPIC -shared sh/l2norm.c -I../../include/libs/function/ -I../../include/client -I../../include/util -o /tmp/udf/libl2norm.so +gcc -fPIC -shared sh/gpd.c -I../../include/libs/function/ -I../../include/client -I../../include/util -o /tmp/udf/libgpd.so +echo "debug show /tmp/udf/*.so" +ls /tmp/udf/*.so + diff --git a/tests/script/sh/copy_udf.bat b/tests/script/sh/copy_udf.bat new file mode 100644 index 0000000000000000000000000000000000000000..597b77792e7af001fcd3717dc84de77d835b2a7d --- /dev/null +++ b/tests/script/sh/copy_udf.bat @@ -0,0 +1,23 @@ +@echo off + +echo Executing copy_udf.bat +set SCRIPT_DIR=%cd% +echo SCRIPT_DIR: %SCRIPT_DIR% + +echo %cd% | grep community > nul && cd ..\..\.. || cd ..\.. +set TAOS_DIR=%cd% +echo find udf library in %TAOS_DIR% +set UDF1_DIR=%TAOS_DIR%\debug\build\lib\udf1.dll +set UDF2_DIR=%TAOS_DIR%\debug\build\lib\udf2.dll + +echo %UDF1_DIR% +echo %UDF2_DIR% + +set UDF_TMP=C:\Windows\Temp\udf +rm -rf %UDF_TMP% +mkdir %UDF_TMP% + +echo Copy udf shared library files to %UDF_TMP% + +cp %UDF1_DIR% %UDF_TMP% +cp %UDF2_DIR% %UDF_TMP% diff --git a/tests/script/sh/copy_udf.sh b/tests/script/sh/copy_udf.sh new file mode 100755 index 0000000000000000000000000000000000000000..c3c300fb7b6ae33832fc898e9bd987be5ce9f6d0 --- /dev/null +++ b/tests/script/sh/copy_udf.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +set +e +#set -x + +echo "Executing copy_udf.sh" +SCRIPT_DIR=`dirname $0` +cd $SCRIPT_DIR/../ +SCRIPT_DIR=`pwd` +echo "SCRIPT_DIR: ${SCRIPT_DIR}" + +IN_TDINTERNAL="community" +if [[ "$SCRIPT_DIR" == *"$IN_TDINTERNAL"* ]]; then + cd ../../.. +else + cd ../../ +fi + +TAOS_DIR=`pwd` +echo "find udf library in $TAOS_DIR" +UDF1_DIR=`find $TAOS_DIR -name "libudf1.so"|grep lib|head -n1` +UDF2_DIR=`find $TAOS_DIR -name "libudf2.so"|grep lib|head -n1` + +echo $UDF1_DIR +echo $UDF2_DIR + +UDF_TMP=/tmp/udf +rm -rf $UDF_TMP +mkdir $UDF_TMP + +echo "Copy udf shared library files to $UDF_TMP" + +cp $UDF1_DIR $UDF_TMP +echo "copy udf1 result: $?" +cp $UDF2_DIR $UDF_TMP +echo "copy udf2 result: $?" diff --git a/tests/script/sh/demo.c b/tests/script/sh/demo.c deleted file mode 100644 index 23d217444892e5bd8dc1c83b569dc22616c42e78..0000000000000000000000000000000000000000 --- a/tests/script/sh/demo.c +++ /dev/null @@ -1,112 +0,0 @@ -#include -#include -#include - -typedef struct SUdfInit{ - int maybe_null; /* 1 if function can return NULL */ - int decimals; /* for real functions */ - long long length; /* For string functions */ - char *ptr; /* free pointer for function data */ - int const_item; /* 0 if result is independent of arguments */ -} SUdfInit; - -typedef struct SDemo{ - double sum; - int num; - short otype; -}SDemo; - -#define FLOAT_NULL 0x7FF00000 // it is an NAN -#define DOUBLE_NULL 0x7FFFFF0000000000L // it is an NAN - - -void demo(char* data, short itype, short ibytes, int numOfRows, long long* ts, char* dataOutput, char* interBuf, char* tsOutput, - int* numOfOutput, short otype, short obytes, SUdfInit* buf) { - int i; - double r = 0; - SDemo *p = (SDemo *)interBuf; - SDemo *q = (SDemo *)dataOutput; - printf("demo input data:%p, type:%d, rows:%d, ts:%p,%lld, dataoutput:%p, interBUf:%p, tsOutput:%p, numOfOutput:%p, buf:%p\n", data, itype, numOfRows, ts, *ts, dataOutput, interBuf, tsOutput, numOfOutput, buf); - - for(i=0;isum += r*r; - } - - p->otype = otype; - p->num += numOfRows; - - q->sum = p->sum; - q->num = p->num; - q->otype = p->otype; - - *numOfOutput=1; - - printf("demo out, sum:%f, num:%d, numOfOutput:%d\n", p->sum, p->num, *numOfOutput); -} - - -void demo_merge(char* data, int32_t numOfRows, char* dataOutput, int32_t* numOfOutput, SUdfInit* buf) { - int i; - SDemo *p = (SDemo *)data; - SDemo res = {0}; - printf("demo_merge input data:%p, rows:%d, dataoutput:%p, numOfOutput:%p, buf:%p\n", data, numOfRows, dataOutput, numOfOutput, buf); - - for(i=0;isum * p->sum; - res.num += p->num; - p++; - } - - p->sum = res.sum; - p->num = res.num; - - *numOfOutput=1; - - printf("demo out, sum:%f, num:%d, numOfOutput:%d\n", p->sum, p->num, *numOfOutput); -} - - - -void demo_finalize(char* dataOutput, char* interBuf, int* numOfOutput, SUdfInit* buf) { - SDemo *p = (SDemo *)interBuf; - printf("demo_finalize interbuf:%p, numOfOutput:%p, buf:%p, sum:%f, num:%d\n", interBuf, numOfOutput, buf, p->sum, p->num); - if (p->otype == 6) { - if (p->num != 30000) { - *(unsigned int *)dataOutput = FLOAT_NULL; - } else { - *(float *)dataOutput = (float)(p->sum / p->num); - } - printf("finalize values:%f\n", *(float *)dataOutput); - } else if (p->otype == 7) { - if (p->num != 30000) { - *(unsigned long long *)dataOutput = DOUBLE_NULL; - } else { - *(double *)dataOutput = (double)(p->sum / p->num); - } - printf("finalize values:%f\n", *(double *)dataOutput); - } - - *numOfOutput=1; - - printf("demo finalize, numOfOutput:%d\n", *numOfOutput); -} - - -int demo_init(SUdfInit* buf) { - printf("demo init\n"); - return 0; -} - - -void demo_destroy(SUdfInit* buf) { - printf("demo destroy\n"); -} - diff --git a/tests/script/sh/demo.lua b/tests/script/sh/demo.lua deleted file mode 100644 index c5e5582fc30b58db30a5b18faa4ccfd0a5f656d0..0000000000000000000000000000000000000000 --- a/tests/script/sh/demo.lua +++ /dev/null @@ -1,43 +0,0 @@ -funcName = "test" - -global = {} - -function test_init() - return global -end - -function test_add(rows, ans, key) - t = {} - t["sum"] = 0.0 - t["num"] = 0 - for i=1, #rows do - t["sum"] = t["sum"] + rows[i] * rows[i] - end - t["num"] = #rows - - - if (ans[key] ~= nil) - then - ans[key]["sum"] = ans[key]["sum"] + t["sum"] - ans[key]["num"] = ans[key]["num"] + t["num"] - else - ans[key] = t - end - - return ans; -end - -function test_finalize(ans, key) - local ret = 0.0 - - if (ans[key] ~= nil and ans[key]["num"] == 30000) - then - ret = ans[key]["sum"]/ans[key]["num"] - ans[key]["sum"] = 0.0 - ans[key]["num"] = 0 - else - ret = inf - end - - return ret, ans -end diff --git a/tests/script/sh/deploy.bat b/tests/script/sh/deploy.bat index 921f1611fb5ea80bbeb746b693d7c529c421ef27..38e7022edec2c5dae9053e11934a65e108a75b93 100644 --- a/tests/script/sh/deploy.bat +++ b/tests/script/sh/deploy.bat @@ -13,12 +13,12 @@ rem echo NODE: %NODE% set SCRIPT_DIR=%~dp0..\ rem echo SCRIPT_DIR: %SCRIPT_DIR% -set BUILD_DIR=%SCRIPT_DIR%..\..\..\debug\build\bin\ +echo %cd% | grep community > nul && set "BUILD_DIR=%SCRIPT_DIR%..\..\..\debug\build\bin\" || set "BUILD_DIR=%SCRIPT_DIR%..\..\debug\build\bin\" set TSIM=%BUILD_DIR%tsim rem echo BUILD_DIR: %BUILD_DIR% rem echo TSIM: %TSIM% -set SIM_DIR=%SCRIPT_DIR%..\..\..\sim\ +echo %cd% | grep community > nul && set "SIM_DIR=%SCRIPT_DIR%..\..\..\sim\" || set "SIM_DIR=%SCRIPT_DIR%..\..\sim\" rem echo SIM_DIR: %SIM_DIR% set NODE_DIR=%SIM_DIR%%NODE_NAME%\ @@ -59,48 +59,30 @@ for /f "skip=1" %%A in ( 'wmic computersystem get caption' ) do if not defined fqdn set "fqdn=%%A" -echo firstEp %fqdn%:7100 > %TAOS_CFG% +echo firstEp %fqdn%:7100 >> %TAOS_CFG% +echo secondEp %fqdn%:7200 >> %TAOS_CFG% echo fqdn %fqdn% >> %TAOS_CFG% echo serverPort %NODE% >> %TAOS_CFG% +echo supportVnodes 128 >> %TAOS_CFG% echo dataDir %DATA_DIR% >> %TAOS_CFG% echo logDir %LOG_DIR% >> %TAOS_CFG% echo debugFlag 0 >> %TAOS_CFG% -echo mDebugFlag 135 >> %TAOS_CFG% -echo sdbDebugFlag 135 >> %TAOS_CFG% -echo dDebugFlag 135 >> %TAOS_CFG% -echo vDebugFlag 135 >> %TAOS_CFG% -echo tsdbDebugFlag 135 >> %TAOS_CFG% -echo cDebugFlag 135 >> %TAOS_CFG% -echo jnidebugFlag 135 >> %TAOS_CFG% -echo odbcdebugFlag 135 >> %TAOS_CFG% -echo httpDebugFlag 135 >> %TAOS_CFG% -echo monDebugFlag 135 >> %TAOS_CFG% -echo mqttDebugFlag 135 >> %TAOS_CFG% -echo qdebugFlag 135 >> %TAOS_CFG% -echo rpcDebugFlag 135 >> %TAOS_CFG% +echo mDebugFlag 143 >> %TAOS_CFG% +echo dDebugFlag 143 >> %TAOS_CFG% +echo vDebugFlag 143 >> %TAOS_CFG% +echo tqDebugFlag 143 >> %TAOS_CFG% +echo tsdbDebugFlag 143 >> %TAOS_CFG% +echo cDebugFlag 143 >> %TAOS_CFG% +echo jniDebugFlag 143 >> %TAOS_CFG% +echo qDebugFlag 143 >> %TAOS_CFG% +echo rpcDebugFlag 143 >> %TAOS_CFG% echo tmrDebugFlag 131 >> %TAOS_CFG% -echo udebugFlag 135 >> %TAOS_CFG% -echo sdebugFlag 135 >> %TAOS_CFG% -echo wdebugFlag 135 >> %TAOS_CFG% -echo cqdebugFlag 135 >> %TAOS_CFG% -echo monitor 0 >> %TAOS_CFG% -echo monitorInterval 1 >> %TAOS_CFG% -echo http 0 >> %TAOS_CFG% -echo slaveQuery 0 >> %TAOS_CFG% -echo numOfThreadsPerCore 2.0 >> %TAOS_CFG% -echo defaultPass taosdata >> %TAOS_CFG% +echo uDebugFlag 143 >> %TAOS_CFG% +echo sDebugFlag 143 >> %TAOS_CFG% +echo wDebugFlag 143 >> %TAOS_CFG% echo numOfLogLines 20000000 >> %TAOS_CFG% -echo mnodeEqualVnodeNum 0 >> %TAOS_CFG% -echo balanceInterval 1 >> %TAOS_CFG% -echo clog 2 >> %TAOS_CFG% -echo days 10 >> %TAOS_CFG% echo statusInterval 1 >> %TAOS_CFG% -echo maxVgroupsPerDb 4 >> %TAOS_CFG% -echo minTablesPerVnode 4 >> %TAOS_CFG% -echo maxTablesPerVnode 1000 >> %TAOS_CFG% -echo tableIncStepPerVnode 10000 >> %TAOS_CFG% echo asyncLog 0 >> %TAOS_CFG% -echo numOfMnodes 1 >> %TAOS_CFG% echo locale en_US.UTF-8 >> %TAOS_CFG% -echo fsync 0 >> %TAOS_CFG% echo telemetryReporting 0 >> %TAOS_CFG% +echo querySmaOptimize 1 >> %TAOS_CFG% diff --git a/tests/script/sh/deploy.sh b/tests/script/sh/deploy.sh index cde27d7dc3fa00258d9d9d50ee3dfc82f858b138..7da8da09bfcf3810b692ff25f37333c93ae87497 100755 --- a/tests/script/sh/deploy.sh +++ b/tests/script/sh/deploy.sh @@ -1,19 +1,15 @@ #!/bin/bash -echo "Executing deploy.sh" +set +e +#set -x -if [ $# != 4 ]; then - echo "argument list need input : " - echo " -n nodeName" - echo " -i nodePort" - exit 1 -fi +echo "Executing deploy.sh" UNAME_BIN=`which uname` OS_TYPE=`$UNAME_BIN` NODE_NAME= NODE= -while getopts "n:i:" arg +while getopts "n:i:m" arg do case $arg in n) @@ -43,11 +39,7 @@ fi TAOS_DIR=`pwd` TAOSD_DIR=`find . -name "taosd"|grep bin|head -n1` -if [[ "$OS_TYPE" != "Darwin" ]]; then - cut_opt="--field=" -else - cut_opt="-f " -fi +cut_opt="-f " if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2,3` @@ -55,7 +47,7 @@ else BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2` fi -BUILD_DIR=$TAOS_DIR/$BIN_DIR/build +BUILD_DIR=$TAOS_DIR/$BIN_DIR SIM_DIR=$TAOS_DIR/sim @@ -110,54 +102,44 @@ elif [ $NODE -eq 7 ]; then NODE=7700 elif [ $NODE -eq 8 ]; then NODE=7800 +elif [ $NODE -eq 9 ]; then + NODE=7900 fi +HOSTNAME=localhost + echo " " >> $TAOS_CFG echo "firstEp ${HOSTNAME}:7100" >> $TAOS_CFG echo "secondEp ${HOSTNAME}:7200" >> $TAOS_CFG +echo "fqdn ${HOSTNAME}" >> $TAOS_CFG echo "serverPort ${NODE}" >> $TAOS_CFG +echo "supportVnodes 1024" >> $TAOS_CFG +echo "statusInterval 1" >> $TAOS_CFG echo "dataDir $DATA_DIR" >> $TAOS_CFG echo "logDir $LOG_DIR" >> $TAOS_CFG echo "debugFlag 0" >> $TAOS_CFG -echo "mDebugFlag 143" >> $TAOS_CFG -echo "sdbDebugFlag 143" >> $TAOS_CFG +echo "tmrDebugFlag 131" >> $TAOS_CFG +echo "uDebugFlag 143" >> $TAOS_CFG +echo "rpcDebugFlag 143" >> $TAOS_CFG +echo "jniDebugFlag 143" >> $TAOS_CFG +echo "qDebugFlag 143" >> $TAOS_CFG +echo "cDebugFlag 143" >> $TAOS_CFG echo "dDebugFlag 143" >> $TAOS_CFG echo "vDebugFlag 143" >> $TAOS_CFG +echo "mDebugFlag 143" >> $TAOS_CFG +echo "wDebugFlag 143" >> $TAOS_CFG +echo "sDebugFlag 143" >> $TAOS_CFG echo "tsdbDebugFlag 143" >> $TAOS_CFG -echo "cDebugFlag 143" >> $TAOS_CFG -echo "jnidebugFlag 143" >> $TAOS_CFG -echo "odbcdebugFlag 143" >> $TAOS_CFG -echo "httpDebugFlag 143" >> $TAOS_CFG -echo "monDebugFlag 143" >> $TAOS_CFG -echo "mqttDebugFlag 143" >> $TAOS_CFG -echo "qdebugFlag 143" >> $TAOS_CFG -echo "rpcDebugFlag 143" >> $TAOS_CFG -echo "tmrDebugFlag 131" >> $TAOS_CFG -echo "udebugFlag 143" >> $TAOS_CFG -echo "sdebugFlag 143" >> $TAOS_CFG -echo "wdebugFlag 143" >> $TAOS_CFG -echo "cqdebugFlag 143" >> $TAOS_CFG -echo "monitor 0" >> $TAOS_CFG -echo "monitorInterval 1" >> $TAOS_CFG -echo "http 0" >> $TAOS_CFG -echo "slaveQuery 0" >> $TAOS_CFG -echo "numOfThreadsPerCore 2.0" >> $TAOS_CFG -echo "defaultPass taosdata" >> $TAOS_CFG +echo "tdbDebugFlag 143" >> $TAOS_CFG +echo "tqDebugFlag 143" >> $TAOS_CFG +echo "fsDebugFlag 143" >> $TAOS_CFG +echo "idxDebugFlag 143" >> $TAOS_CFG +echo "udfDebugFlag 143" >> $TAOS_CFG +echo "smaDebugFlag 143" >> $TAOS_CFG +echo "metaDebugFlag 143" >> $TAOS_CFG echo "numOfLogLines 20000000" >> $TAOS_CFG -echo "mnodeEqualVnodeNum 0" >> $TAOS_CFG -echo "balanceInterval 1" >> $TAOS_CFG -echo "clog 2" >> $TAOS_CFG -#echo "cache 1" >> $TAOS_CFG -echo "days 10" >> $TAOS_CFG -echo "statusInterval 1" >> $TAOS_CFG -echo "maxVgroupsPerDb 4" >> $TAOS_CFG -echo "minTablesPerVnode 4" >> $TAOS_CFG -echo "maxTablesPerVnode 1000" >> $TAOS_CFG -echo "tableIncStepPerVnode 10000" >> $TAOS_CFG echo "asyncLog 0" >> $TAOS_CFG -echo "numOfMnodes 1" >> $TAOS_CFG echo "locale en_US.UTF-8" >> $TAOS_CFG -echo "fsync 0" >> $TAOS_CFG echo "telemetryReporting 0" >> $TAOS_CFG +echo "querySmaOptimize 1" >> $TAOS_CFG echo " " >> $TAOS_CFG - diff --git a/tests/script/sh/exec-default.sh b/tests/script/sh/exec-default.sh index f648315c6745f63cfba9eb06ecfd53a9ccef1fed..0a83fa1dc81e5b510e85c2a5e2bf7f646f69e684 100755 --- a/tests/script/sh/exec-default.sh +++ b/tests/script/sh/exec-default.sh @@ -54,11 +54,7 @@ fi TAOS_DIR=`pwd` TAOSD_DIR=`find . -name "taosd"|grep bin|head -n1` -if [[ "$OS_TYPE" != "Darwin" ]]; then - cut_opt="--field=" -else - cut_opt="-f " -fi +cut_opt="-f " if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2,3` diff --git a/tests/script/sh/exec-no-random-fail.sh b/tests/script/sh/exec-no-random-fail.sh index e01b18a8e6bb0d4631a28c64c6accd57bceb9076..b15783afb569a760405091dea54dcbb6832fa920 100755 --- a/tests/script/sh/exec-no-random-fail.sh +++ b/tests/script/sh/exec-no-random-fail.sh @@ -54,11 +54,7 @@ fi TAOS_DIR=`pwd` TAOSD_DIR=`find . -name "taosd"|grep bin|head -n1` -if [[ "$OS_TYPE" != "Darwin" ]]; then - cut_opt="--field=" -else - cut_opt="-f " -fi +cut_opt="-f " if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2,3` diff --git a/tests/script/sh/exec-random-fail.sh b/tests/script/sh/exec-random-fail.sh index 1f31899e3ac26861fc1860d10cb0a4899ff7bf36..15f2864ca6d9b371412e173ea81034a52a437994 100755 --- a/tests/script/sh/exec-random-fail.sh +++ b/tests/script/sh/exec-random-fail.sh @@ -54,11 +54,7 @@ fi TAOS_DIR=`pwd` TAOSD_DIR=`find . -name "taosd"|grep bin|head -n1` -if [[ "$OS_TYPE" != "Darwin" ]]; then - cut_opt="--field=" -else - cut_opt="-f " -fi +cut_opt="-f " if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2,3` diff --git a/tests/script/sh/exec.bat b/tests/script/sh/exec.bat index 6651c7aa8f726d5d42d52a4c8f3a9395e7a12151..88dd43349ed9d49774faafe827b77126286c741d 100644 --- a/tests/script/sh/exec.bat +++ b/tests/script/sh/exec.bat @@ -6,6 +6,7 @@ if %1 == -n set NODE_NAME=%2 if %1 == -s set EXEC_OPTON=%2 if %3 == -n set NODE_NAME=%4 if %3 == -s set EXEC_OPTON=%4 +if "%5" == "-x" set EXEC_SIGNAL=%6 rem echo NODE_NAME: %NODE_NAME% rem echo NODE: %EXEC_OPTON% @@ -13,12 +14,12 @@ rem echo NODE: %EXEC_OPTON% set SCRIPT_DIR=%~dp0..\ rem echo SCRIPT_DIR: %SCRIPT_DIR% -set BUILD_DIR=%SCRIPT_DIR%..\..\..\debug\build\bin\ +echo %cd% | grep community > nul && set "BUILD_DIR=%SCRIPT_DIR%..\..\..\debug\build\bin\" || set "BUILD_DIR=%SCRIPT_DIR%..\..\debug\build\bin\" set TAOSD=%BUILD_DIR%taosd rem echo BUILD_DIR: %BUILD_DIR% rem echo TAOSD: %TAOSD% -set SIM_DIR=%SCRIPT_DIR%..\..\..\sim\ +echo %cd% | grep community > nul && set "SIM_DIR=%SCRIPT_DIR%..\..\..\sim\" || set "SIM_DIR=%SCRIPT_DIR%..\..\sim\" rem echo SIM_DIR: %SIM_DIR% set NODE_DIR=%SIM_DIR%%NODE_NAME%\ @@ -30,19 +31,56 @@ rem echo CFG_DIR: %CFG_DIR% set TAOS_CFG=%CFG_DIR%taos.cfg rem echo TAOS_CFG: %TAOS_CFG% +set LOG_DIR=%NODE_DIR%log\ +rem echo LOG_DIR: %LOG_DIR% + +set TAOS_LOG=%LOG_DIR%taosdlog.0 +rem echo TAOS_LOG: %TAOS_LOG% + if %EXEC_OPTON% == start ( + rm -rf %TAOS_LOG% echo start %TAOSD% -c %CFG_DIR% - start %TAOSD% -c %CFG_DIR% + mintty -h never %TAOSD% -c %CFG_DIR% + set /a check_num=0 +:check_online + sleep 1 + set /a check_num=check_num+1 + if "%check_num%" == "11" ( + echo check online out time. + goto :finish + ) + echo check taosd online + tail -n +0 %TAOS_LOG% | grep -q "TDengine initialized successfully" || goto :check_online + echo finish + goto :finish ) if %EXEC_OPTON% == stop ( rem echo wmic process where "name='taosd.exe' and CommandLine like '%%%NODE_NAME%%%'" list INSTANCE rem wmic process where "name='taosd.exe' and CommandLine like '%%%NODE_NAME%%%'" call terminate > NUL 2>&1 - for /f "tokens=1 skip=1" %%A in ( - 'wmic process where "name='taosd.exe' and CommandLine like '%%%NODE_NAME%%%'" get processId ' - ) do ( - rem echo taskkill /IM %%A - taskkill /IM %%A > NUL 2>&1 + for /f "tokens=2" %%A in ('wmic process where "name='taosd.exe' and CommandLine like '%%%NODE_NAME%%%'" get processId ^| xargs echo') do ( + for /f "tokens=1" %%B in ('ps ^| grep %%A') do ( + if "%EXEC_SIGNAL%" == "SIGKILL" ( + kill -9 %%B + ) else ( + kill -INT %%B + call :check_offline + ) + ) + goto :finish ) ) +:finish +goto :eof + +:check_offline +sleep 1 +for /f "tokens=2" %%C in ('wmic process where "name='taosd.exe' and CommandLine like '%%%NODE_NAME%%%'" get processId ^| xargs echo') do ( + for /f "tokens=1" %%D in ('ps ^| grep %%C') do ( + echo kill -INT %%D + echo check taosd offline %NODE_NAME% %%C %%D + goto :check_offline + ) +) +goto :eof \ No newline at end of file diff --git a/tests/script/sh/exec.sh b/tests/script/sh/exec.sh index 80b8cda428da72daf55fc6e0d4c47867ce191d35..f548a4cc418d34ac5d4fe27fb944181c54b3a5c2 100755 --- a/tests/script/sh/exec.sh +++ b/tests/script/sh/exec.sh @@ -8,13 +8,25 @@ # exit 1 # fi +set +e +#set -x +if [[ "$OSTYPE" == "darwin"* ]]; then + TD_OS="Darwin" +else + OS=$(cat /etc/*-release | grep "^NAME=" | cut -d= -f2) + len=$(echo ${#OS}) + len=$((len-2)) + TD_OS=$(echo -ne ${OS:1:${len}} | cut -d" " -f1) +fi + +unset LD_PRELOAD UNAME_BIN=`which uname` OS_TYPE=`$UNAME_BIN` NODE_NAME= EXEC_OPTON= CLEAR_OPTION="false" -while getopts "n:s:u:x:ct" arg +while getopts "n:s:u:x:cv" arg do case $arg in n) @@ -26,8 +38,8 @@ do c) CLEAR_OPTION="clear" ;; - t) - SHELL_OPTION="true" + v) + VALGRIND_OPTION="true" ;; u) USERS=$OPTARG @@ -40,7 +52,10 @@ do ;; esac done - +if [[ "$VALGRIND_OPTION" = "true" ]] && [[ "$TD_OS" == "Alpine" ]]; then + echo alpine skip valgrind + VALGRIND_OPTION="false" +fi SCRIPT_DIR=`dirname $0` cd $SCRIPT_DIR/../ SCRIPT_DIR=`pwd` @@ -55,11 +70,7 @@ fi TAOS_DIR=`pwd` TAOSD_DIR=`find . -name "taosd"|grep bin|head -n1` -if [[ "$OS_TYPE" != "Darwin" ]]; then - cut_opt="--field=" -else - cut_opt="-f " -fi +cut_opt="-f " if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2,3` @@ -67,17 +78,17 @@ else BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2` fi -BUILD_DIR=$TAOS_DIR/$BIN_DIR/build +BUILD_DIR=$TAOS_DIR/$BIN_DIR SIM_DIR=$TAOS_DIR/sim NODE_DIR=$SIM_DIR/$NODE_NAME -EXE_DIR=$BUILD_DIR/bin +EXE_DIR=$BUILD_DIR/build/bin CFG_DIR=$NODE_DIR/cfg LOG_DIR=$NODE_DIR/log DATA_DIR=$NODE_DIR/data MGMT_DIR=$NODE_DIR/data/mgmt TSDB_DIR=$NODE_DIR/data/tsdb - +ASAN_DIR=$SIM_DIR/asan TAOS_CFG=$NODE_DIR/cfg/taos.cfg echo ------------ $EXEC_OPTON $NODE_NAME @@ -94,14 +105,15 @@ if [ "$CLEAR_OPTION" = "clear" ]; then fi if [ "$EXEC_OPTON" = "start" ]; then - echo "ExcuteCmd:" $EXE_DIR/taosd -c $CFG_DIR - - if [ "$SHELL_OPTION" = "true" ]; then + #echo "ExcuteCmd:" $EXE_DIR/taosd -c $CFG_DIR + if [ "$VALGRIND_OPTION" = "true" ]; then TT=`date +%s` - mkdir ${LOG_DIR}/${TT} - nohup valgrind --log-file=${LOG_DIR}/${TT}/valgrind.log --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 & + #mkdir ${LOG_DIR}/${TT} + echo "nohup valgrind --log-file=${LOG_DIR}/valgrind-taosd-${NODE_NAME}-${TT}.log --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v -v --workaround-gcc296-bugs=yes $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 &" + nohup valgrind --log-file=${LOG_DIR}/valgrind-taosd-${NODE_NAME}-${TT}.log --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v -v --workaround-gcc296-bugs=yes $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 & else - nohup $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 & + echo "nohup $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 &" + nohup $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2> $ASAN_DIR/$NODE_NAME.asan & fi else diff --git a/tests/script/sh/exec_tarbitrator.sh b/tests/script/sh/exec_tarbitrator.sh index e985bd65856025b2db8dfef724fdc652b2a03392..eacf5fd2262e54e5ef8d2f475e9cc4faf0515430 100755 --- a/tests/script/sh/exec_tarbitrator.sh +++ b/tests/script/sh/exec_tarbitrator.sh @@ -51,11 +51,7 @@ fi TAOS_DIR=`pwd` TAOSD_DIR=`find . -name "taosd"|grep bin|head -n1` -if [[ "$OS_TYPE" != "Darwin" ]]; then - cut_opt="--field=" -else - cut_opt="-f " -fi +cut_opt="-f " if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2,3` diff --git a/tests/script/sh/gpd.c b/tests/script/sh/gpd.c new file mode 100644 index 0000000000000000000000000000000000000000..2259efa64a500cd7bf331642445c47a08883bec1 --- /dev/null +++ b/tests/script/sh/gpd.c @@ -0,0 +1,46 @@ +#include +#include +#include +#ifdef LINUX +#include +#endif +#ifdef WINDOWS +#include +#endif +#include "taosudf.h" + +TAOS* taos = NULL; + +DLL_EXPORT int32_t gpd_init() { + return 0; +} + +DLL_EXPORT int32_t gpd_destroy() { + return 0; +} + +DLL_EXPORT int32_t gpd(SUdfDataBlock* block, SUdfColumn *resultCol) { + SUdfColumnMeta *meta = &resultCol->colMeta; + meta->bytes = 4; + meta->type = TSDB_DATA_TYPE_INT; + meta->scale = 0; + meta->precision = 0; + + SUdfColumnData *resultData = &resultCol->colData; + resultData->numOfRows = block->numOfRows; + for (int32_t i = 0; i < resultData->numOfRows; ++i) { + int64_t* calc_ts = (int64_t*)udfColDataGetData(block->udfCols[0], i); + char* varTbname = udfColDataGetData(block->udfCols[1], i); + char* varDbname = udfColDataGetData(block->udfCols[2], i); + + char dbName[256] = {0}; + char tblName[256] = {0}; + memcpy(dbName, varDataVal(varDbname), varDataLen(varDbname)); + memcpy(tblName, varDataVal(varTbname), varDataLen(varTbname)); + printf("%s, %s\n", dbName, tblName); + int32_t result = 0; + udfColDataSet(resultCol, i, (char*)&result, false); + } + + return 0; +} diff --git a/tests/script/sh/l2norm.c b/tests/script/sh/l2norm.c new file mode 100644 index 0000000000000000000000000000000000000000..8ccdffb8d69d1ba27410ad61a74ee3d7ca156be2 --- /dev/null +++ b/tests/script/sh/l2norm.c @@ -0,0 +1,80 @@ +#include +#include +#include +#include + +#include "taosudf.h" + +DLL_EXPORT int32_t l2norm_init() { + return 0; +} + +DLL_EXPORT int32_t l2norm_destroy() { + return 0; +} + +DLL_EXPORT int32_t l2norm_start(SUdfInterBuf *buf) { + *(int64_t*)(buf->buf) = 0; + buf->bufLen = sizeof(double); + buf->numOfResult = 0; + return 0; +} + +DLL_EXPORT int32_t l2norm(SUdfDataBlock* block, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf) { + double sumSquares = *(double*)interBuf->buf; + int8_t numNotNull = 0; + for (int32_t i = 0; i < block->numOfCols; ++i) { + SUdfColumn* col = block->udfCols[i]; + if (!(col->colMeta.type == TSDB_DATA_TYPE_INT || + col->colMeta.type == TSDB_DATA_TYPE_DOUBLE)) { + return TSDB_CODE_UDF_INVALID_INPUT; + } + } + for (int32_t i = 0; i < block->numOfCols; ++i) { + for (int32_t j = 0; j < block->numOfRows; ++j) { + SUdfColumn* col = block->udfCols[i]; + if (udfColDataIsNull(col, j)) { + continue; + } + switch (col->colMeta.type) { + case TSDB_DATA_TYPE_INT: { + char* cell = udfColDataGetData(col, j); + int32_t num = *(int32_t*)cell; + sumSquares += (double)num * num; + break; + } + case TSDB_DATA_TYPE_DOUBLE: { + char* cell = udfColDataGetData(col, j); + double num = *(double*)cell; + sumSquares += num * num; + break; + } + default: + break; + } + ++numNotNull; + } + } + + *(double*)(newInterBuf->buf) = sumSquares; + newInterBuf->bufLen = sizeof(double); + + if (interBuf->numOfResult == 0 && numNotNull == 0) { + newInterBuf->numOfResult = 0; + } else { + newInterBuf->numOfResult = 1; + } + return 0; +} + +DLL_EXPORT int32_t l2norm_finish(SUdfInterBuf* buf, SUdfInterBuf *resultData) { + if (buf->numOfResult == 0) { + resultData->numOfResult = 0; + return 0; + } + double sumSquares = *(double*)(buf->buf); + *(double*)(resultData->buf) = sqrt(sumSquares); + resultData->bufLen = sizeof(double); + resultData->numOfResult = 1; + return 0; +} diff --git a/tests/script/sh/move_dnode.sh b/tests/script/sh/move_dnode.sh deleted file mode 100755 index d3650c18ad0f49185ce1e1268273b8a44e3cdc14..0000000000000000000000000000000000000000 --- a/tests/script/sh/move_dnode.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/bin/bash - -echo "Executing move_dnode.sh" - -UNAME_BIN=`which uname` -OS_TYPE=`$UNAME_BIN` - -SCRIPT_DIR=`dirname $0` -cd $SCRIPT_DIR/../ -SCRIPT_DIR=`pwd` -echo "SCRIPT_DIR: $SCRIPT_DIR" - -IN_TDINTERNAL="community" -if [[ "$SCRIPT_DIR" == *"$IN_TDINTERNAL"* ]]; then - cd ../../.. -else - cd ../../ -fi - -TAOS_DIR=`pwd` -TAOSD_DIR=`find . -name "taosd"|grep bin|head -n1` - -if [[ "$OS_TYPE" != "Darwin" ]]; then - cut_opt="--field=" -else - cut_opt="-f " -fi - -if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then - BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2,3` -else - BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2` -fi - -BUILD_DIR=$TAOS_DIR/$BIN_DIR/build - -SIM_DIR=$TAOS_DIR/sim - -NODE_DIR=$SIM_DIR/$NODE_NAME - -if [ -d "$SIM_DIR/$2" ];then - rm -rf $SIM_DIR/$2 -fi -mv $SIM_DIR/$1 $SIM_DIR/$2 - -if [[ $2 =~ "dnode2" ]];then - sed -i 's/serverPort 7100/serverPort 7200/g' $SIM_DIR/$2/cfg/taos.cfg - sed -i 's/dnode1/dnode2/g' $SIM_DIR/$2/cfg/taos.cfg - sed -i 's/7100/7200/g' $SIM_DIR/$2/data/dnode/dnodeEps.json -elif [[ $2 =~ "dnode4" ]];then - sed -i 's/serverPort 7100/serverPort 7400/g' $SIM_DIR/$2/cfg/taos.cfg - sed -i 's/dnode1/dnode4/g' $SIM_DIR/$2/cfg/taos.cfg - sed -i 's/7100/7400/g' $SIM_DIR/dnode2/data/dnode/dnodeEps.json - sed -i 's/7100/7400/g' $SIM_DIR/dnode3/data/dnode/dnodeEps.json - sed -i 's/7100/7400/g' $SIM_DIR/$2/data/dnode/dnodeEps.json -fi diff --git a/tests/script/sh/mv_old_data.sh b/tests/script/sh/mv_old_data.sh index 3f4be6714fd757ee60d21ee97be5d411e3f95bdd..1a6bd22c621873253bf98690900759c1ef2304d7 100755 --- a/tests/script/sh/mv_old_data.sh +++ b/tests/script/sh/mv_old_data.sh @@ -20,11 +20,7 @@ fi TAOS_DIR=`pwd` TAOSD_DIR=`find . -name "taosd"|grep bin|head -n1` -if [[ "$OS_TYPE" != "Darwin" ]]; then - cut_opt="--field=" -else - cut_opt="-f " -fi +cut_opt="-f " if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2,3` diff --git a/tests/script/sh/prepare_udf.sh b/tests/script/sh/prepare_udf.sh old mode 100755 new mode 100644 index c197ccfa974c62375a265923840fab277819bd3a..a856b96c987ff63dd2c19a30ff007be9cd5b17f9 --- a/tests/script/sh/prepare_udf.sh +++ b/tests/script/sh/prepare_udf.sh @@ -9,7 +9,5 @@ touch /tmp/normal gcc -g -O0 -fPIC -shared sh/sum_double.c -o /tmp/sum_double.so gcc -g -O0 -fPIC -shared sh/add_one.c -o /tmp/add_one.so -gcc -g -O0 -fPIC -shared sh/add_one_64232.c -o /tmp/add_one_64232.so -gcc -g -O0 -fPIC -shared sh/sub_one.c -o /tmp/sub_one.so gcc -g -O0 -fPIC -shared sh/demo.c -o /tmp/demo.so gcc -g -O0 -fPIC -shared sh/abs_max.c -o /tmp/abs_max.so diff --git a/tests/script/sh/sigint_stop_dnodes.sh b/tests/script/sh/sigint_stop_dnodes.sh new file mode 100755 index 0000000000000000000000000000000000000000..83a4f1c1d5fe5ea0c1525c02c4093e3b11806869 --- /dev/null +++ b/tests/script/sh/sigint_stop_dnodes.sh @@ -0,0 +1,16 @@ +#!/bin/sh + +set +e +#set -x + +unset LD_PRELOAD +UNAME_BIN=`which uname` +OS_TYPE=`$UNAME_BIN` + +PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` +echo "Killing taosd processes " $PID +while [ -n "$PID" ]; do + #echo "Killing taosd processes " $PID + kill $PID + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` +done diff --git a/tests/script/sh/stop_dnodes.sh b/tests/script/sh/stop_dnodes.sh index 4c6d8e03510a39c2d5d1d020b5cfe7dabee39cb0..c63d6daf8a6490174f20188e9a5c6057b7007d4a 100755 --- a/tests/script/sh/stop_dnodes.sh +++ b/tests/script/sh/stop_dnodes.sh @@ -1,5 +1,9 @@ #!/bin/sh +set +e +#set -x + +unset LD_PRELOAD UNAME_BIN=`which uname` OS_TYPE=`$UNAME_BIN` @@ -12,8 +16,9 @@ fi PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` while [ -n "$PID" ]; do echo kill -9 $PID - pkill -9 taosd - echo "Killing processes locking on port 6030" + #pkill -9 taosd + kill -9 $PID + echo "Killing taosd processes" if [ "$OS_TYPE" != "Darwin" ]; then fuser -k -n tcp 6030 else @@ -22,15 +27,30 @@ while [ -n "$PID" ]; do PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` done -PID=`ps -ef|grep -w tarbitrator | grep -v grep | awk '{print $2}'` +PID=`ps -ef|grep -w taos | grep -v grep | awk '{print $2}'` while [ -n "$PID" ]; do echo kill -9 $PID - pkill -9 tarbitrator + #pkill -9 taos + kill -9 $PID + echo "Killing taos processes" if [ "$OS_TYPE" != "Darwin" ]; then - fuser -k -n tcp 6040 + fuser -k -n tcp 6030 else - lsof -nti:6040 | xargs kill -9 + lsof -nti:6030 | xargs kill -9 fi - PID=`ps -ef|grep -w tarbitrator | grep -v grep | awk '{print $2}'` + PID=`ps -ef|grep -w taos | grep -v grep | awk '{print $2}'` done +PID=`ps -ef|grep -w tmq_sim | grep -v grep | awk '{print $2}'` +while [ -n "$PID" ]; do + echo kill -9 $PID + #pkill -9 tmq_sim + kill -9 $PID + echo "Killing tmq_sim processes" + if [ "$OS_TYPE" != "Darwin" ]; then + fuser -k -n tcp 6030 + else + lsof -nti:6030 | xargs kill -9 + fi + PID=`ps -ef|grep -w tmq_sim | grep -v grep | awk '{print $2}'` +done diff --git a/tests/script/sh/sub_one.c b/tests/script/sh/sub_one.c deleted file mode 100644 index f901dd72b93fb93986be1c55811b6d728dd6731d..0000000000000000000000000000000000000000 --- a/tests/script/sh/sub_one.c +++ /dev/null @@ -1,33 +0,0 @@ -#include -#include -#include - -typedef struct SUdfInit{ - int maybe_null; /* 1 if function can return NULL */ - int decimals; /* for real functions */ - long long length; /* For string functions */ - char *ptr; /* free pointer for function data */ - int const_item; /* 0 if result is independent of arguments */ -} SUdfInit; - -void sub_one(char* data, short itype, short ibytes, int numOfRows, long long* ts, char* dataOutput, char* interBUf, char* tsOutput, - int* numOfOutput, short otype, short obytes, SUdfInit* buf) { - int i; - int r = 0; - printf("sub_one input data:%p, type:%d, rows:%d, ts:%p,%lld, dataoutput:%p, tsOutput:%p, numOfOutput:%p, buf:%p\n", data, itype, numOfRows, ts, *ts, dataOutput, tsOutput, numOfOutput, buf); - if (itype == 4) { - for(i=0;i -#include -#include -#include - -typedef struct SUdfInit{ - int maybe_null; /* 1 if function can return NULL */ - int decimals; /* for real functions */ - int64_t length; /* For string functions */ - char *ptr; /* free pointer for function data */ - int const_item; /* 0 if result is independent of arguments */ -} SUdfInit; - -#define TSDB_DATA_INT_NULL 0x80000000L - - -void sum_double(char* data, short itype, short ibytes, int numOfRows, int64_t* ts, char* dataOutput, char* interBuf, char* tsOutput, - int* numOfOutput, short otype, short obytes, SUdfInit* buf) { - int i; - int64_t r = 0; - printf("sum_double input data:%p, type:%d, rows:%d, ts:%p,%"PRId64", dataoutput:%p, tsOutput:%p, numOfOutput:%p, buf:%p\n", data, itype, numOfRows, ts, *ts, dataOutput, tsOutput, numOfOutput, buf); - if (itype == 4) { - r=*(int64_t *)dataOutput; - *numOfOutput=0; - - for(i=0;iptr)=*(int64_t*)dataOutput*2; - *(int64_t*)dataOutput=*(int64_t*)(buf->ptr); - // printf("sum_double finalize, dataoutput:%"PRId64", numOfOutput:%d\n", *(int64_t *)dataOutput, *numOfOutput); -} - -void sum_double_merge(char* data, int32_t numOfRows, char* dataOutput, int* numOfOutput, SUdfInit* buf) { - int r = 0; - int64_t sum = 0; - - // printf("sum_double_merge numOfRows:%d, dataoutput:%p, buf:%p\n", numOfRows, dataOutput, buf); - for (int i = 0; i < numOfRows; ++i) { - // printf("sum_double_merge %d - %"PRId64"\n", i, *((int64_t*)data + i)); - sum +=*((int64_t*)data + i); - } - - *(int64_t*)dataOutput+=sum; - if (numOfRows > 0) { - *numOfOutput=1; - } else { - *numOfOutput=0; - } - - // printf("sum_double_merge, dataoutput:%"PRId64", numOfOutput:%d\n", *(int64_t *)dataOutput, *numOfOutput); -} - - -int sum_double_init(SUdfInit* buf) { - buf->maybe_null=1; - buf->ptr = malloc(sizeof(int64_t)); - // printf("sum_double init\n"); - return 0; -} - - -void sum_double_destroy(SUdfInit* buf) { - free(buf->ptr); - // printf("sum_double destroy\n"); -} \ No newline at end of file