提交 a020c1b7 编写于 作者: G Ganlin Zhao

Merge branch '3.0' into fix/TD-19093

......@@ -79,7 +79,7 @@ def pre_test(){
rm -rf debug
mkdir debug
cd debug
cmake .. > /dev/null
cmake .. -DBUILD_TEST=true > /dev/null
make -j4> /dev/null
'''
......
......@@ -173,7 +173,7 @@ def pre_test_build_mac() {
'''
sh '''
cd ${WK}/debug
cmake ..
cmake .. -DBUILD_TEST=true
make -j8
'''
sh '''
......@@ -218,12 +218,12 @@ def pre_test_win(){
if (env.CHANGE_URL =~ /\/TDengine\//) {
bat '''
cd %WIN_INTERNAL_ROOT%
git pull
git pull origin ''' + env.CHANGE_TARGET + '''
'''
bat '''
cd %WIN_COMMUNITY_ROOT%
git remote prune origin
git pull
git pull origin ''' + env.CHANGE_TARGET + '''
'''
bat '''
cd %WIN_COMMUNITY_ROOT%
......@@ -236,7 +236,7 @@ def pre_test_win(){
} else if (env.CHANGE_URL =~ /\/TDinternal\//) {
bat '''
cd %WIN_INTERNAL_ROOT%
git pull
git pull origin ''' + env.CHANGE_TARGET + '''
'''
bat '''
cd %WIN_INTERNAL_ROOT%
......@@ -302,7 +302,7 @@ def pre_test_build_win() {
set CL=/MP8
echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> cmake"
time /t
cmake .. -G "NMake Makefiles JOM" || exit 7
cmake .. -G "NMake Makefiles JOM" -DBUILD_TEST=true || exit 7
echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> jom -j 6"
time /t
jom -j 6 || exit 8
......
......@@ -15,19 +15,19 @@
[![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=develop)](https://coveralls.io/github/taosdata/TDengine?branch=develop)
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4201/badge)](https://bestpractices.coreinfrastructure.org/projects/4201)
English | [简体中文](README-CN.md) | [Lean more about TSDB](https://tdengine.com/tsdb)
English | [简体中文](README-CN.md) | [Learn more about TSDB](https://tdengine.com/tsdb/)
# What is TDengine?
TDengine is an open source, high-performance, cloud native [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. TDengine differentiates itself from other time-seires databases with the following advantages:
- **[High-Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
- **[High Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
- **[Simplified Solution](https://tdengine.com/tdengine/simplified-time-series-data-solution/)**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly.
- **[Cloud Native](https://tdengine.com/tdengine/cloud-native-time-series-database/)**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for kubernetes deployment and full observability, TDengine is a cloud native Time-Series Database and can be deployed on public, private or hybrid clouds.
- **[Ease of Use](https://docs.tdengine.com/get-started/docker/)**: For administrators, TDengine significantly reduces the effort to deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
- **[Ease of Use](https://tdengine.com/tdengine/easy-time-series-data-platform/)**: For administrators, TDengine significantly reduces the effort to deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
......@@ -232,9 +232,9 @@ After building successfully, TDengine can be installed by
sudo make install
```
Users can find more information about directories installed on the system in the [directory and files](https://docs.taosdata.com/reference/directory/) section.
Users can find more information about directories installed on the system in the [directory and files](https://docs.tdengine.com/reference/directory/) section.
Installing from source code will also configure service management for TDengine.Users can also choose to [install from packages](https://docs.taosdata.com/get-started/package/) for it.
Installing from source code will also configure service management for TDengine.Users can also choose to [install from packages](https://docs.tdengine.com/get-started/package/) for it.
To start the service after installation, in a terminal, use:
......@@ -321,7 +321,11 @@ TDengine provides abundant developing tools for users to develop on TDengine. Fo
Please follow the [contribution guidelines](CONTRIBUTING.md) to contribute to the project.
# Join TDengine User Community
# Join the TDengine Community
- Join [TDengine Discord Channel](https://discord.com/invite/VZdSuUg4pS?utm_id=discord)
- Join wechat group by adding WeChat “tdengine”
For more information about TDengine, you can follow us on social media and join our Discord server:
- [Discord](https://discord.com/invite/VZdSuUg4pS)
- [Twitter](https://twitter.com/TaosData)
- [LinkedIn](https://www.linkedin.com/company/tdengine/)
- [YouTube](https://www.youtube.com/channel/UCmp-1U6GS_3V3hjir6Uq5DQ)
......@@ -2,6 +2,12 @@
# Deps options
# =========================================================
option(
BUILD_TEST
"If build unit tests using googletest"
OFF
)
IF(${TD_WINDOWS})
MESSAGE("build pthread Win32")
......@@ -45,12 +51,6 @@ IF(${TD_WINDOWS})
"If build wingetopt on Windows"
ON
)
option(
BUILD_TEST
"If build unit tests using googletest"
ON
)
option(
TDENGINE_3
......@@ -65,28 +65,8 @@ IF(${TD_WINDOWS})
)
ELSEIF (TD_DARWIN_64)
add_definitions(-DCOMPILER_SUPPORTS_CXX13)
option(
BUILD_TEST
"If build unit tests using googletest"
ON
)
ELSE ()
include(CheckCXXCompilerFlag)
CHECK_CXX_COMPILER_FLAG("-std=c++13" COMPILER_SUPPORTS_CXX13)
IF(${COMPILER_SUPPORTS_CXX13})
IF(${BUILD_TEST})
add_definitions(-DCOMPILER_SUPPORTS_CXX13)
option(
BUILD_TEST
"If build unit tests using googletest"
ON
)
ELSE ()
option(
BUILD_TEST
"If build unit tests using googletest"
OFF
)
ENDIF ()
ENDIF ()
......
......@@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "3.0.1.1")
SET(TD_VER_NUMBER "3.0.1.2")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
......
......@@ -2,7 +2,7 @@
# taosadapter
ExternalProject_Add(taosadapter
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
GIT_TAG 05fb2ff
GIT_TAG be729ab
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
......
......@@ -2,7 +2,7 @@
# taos-tools
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
GIT_TAG 285b5e0
GIT_TAG cf1df1c
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
......
......@@ -2,7 +2,7 @@
# taosws-rs
ExternalProject_Add(taosws-rs
GIT_REPOSITORY https://github.com/taosdata/taos-connector-rust.git
GIT_TAG e771403
GIT_TAG 1bdfca3
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosws-rs"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
......
```csharp title="Native Connection"
{{#include docs/examples/csharp/ConnectExample.cs}}
{{#include docs/examples/csharp/connect/Program.cs}}
```
:::info
C# connector supports only native connection for now.
:::
```csharp title="WebSocket Connection"
{{#include docs/examples/csharp/wsConnect/Program.cs}}
```
```csharp
{{#include docs/examples/csharp/InfluxDBLineExample.cs}}
{{#include docs/examples/csharp/influxdbLine/Program.cs}}
```
```csharp
{{#include docs/examples/csharp/OptsJsonExample.cs}}
{{#include docs/examples/csharp/optsJSON/Program.cs}}
```
```csharp
{{#include docs/examples/csharp/OptsTelnetExample.cs}}
{{#include docs/examples/csharp/optsTelnet/Program.cs}}
```
```csharp
{{#include docs/examples/csharp/SQLInsertExample.cs}}
{{#include docs/examples/csharp/sqlInsert/Program.cs}}
```
```csharp
{{#include docs/examples/csharp/StmtInsertExample.cs}}
{{#include docs/examples/csharp/stmtInsert/Program.cs}}
```
```csharp
{{#include docs/examples/csharp/QueryExample.cs}}
{{#include docs/examples/csharp/query/Program.cs}}
```
```csharp
{{#include docs/examples/csharp/AsyncQueryExample.cs}}
{{#include docs/examples/csharp/asyncQuery/Program.cs}}
```
```csharp
{{#include docs/examples/csharp/SubscribeDemo.cs}}
{{#include docs/examples/csharp/subscribe/Program.cs}}
```
\ No newline at end of file
此差异已折叠。
......@@ -68,7 +68,7 @@ A query can be performed on some or all columns. Data and tag columns can all be
### Wildcards
You can use an asterisk (\*) as a wildcard character to indicate all columns. For standard tables, the asterisk indicates only data columns. For supertables and subtables, tag columns are also included.
You can use an asterisk (\*) as a wildcard character to indicate all columns. For normal tables or sub-tables, the asterisk indicates only data columns. For supertables, tag columns are also included when using asterisk (\*).
```sql
SELECT * FROM d1001;
......
......@@ -116,7 +116,7 @@ The parameters are described as follows:
- **protocol**: Specify which connection method to use. For example, `taos+ws://localhost:6041` uses Websocket to establish connections.
- **username/password**: Username and password used to create connections.
- **host/port**: Specifies the server and port to establish a connection. If you do not specify a hostname or port, native connections default to `localhost:6030` and Websocket connections default to `localhost:6041`.
- **database**: Specify the default database to connect to.
- **database**: Specify the default database to connect to. It's optional.
- **params**:Optional parameters.
A sample DSN description string is as follows:
......
......@@ -17,7 +17,7 @@ import CSAsyncQuery from "../../07-develop/04-query-data/_cs_async.mdx"
`TDengine.Connector` is a C# language connector provided by TDengine that allows C# developers to develop C# applications that access TDengine cluster data.
The `TDengine.Connector` connector supports connect to TDengine instances via the TDengine client driver (taosc), providing data writing, querying, subscription, schemaless writing, bind interface, etc. The `TDengine.Connector` currently does not provide a REST connection interface. Developers can write their RESTful application by referring to the [REST API](/reference/rest-api/) documentation.
The `TDengine.Connector` connector supports connect to TDengine instances via the TDengine client driver (taosc), providing data writing, querying, subscription, schemaless writing, bind interface, etc.The `TDengine.Connector` also supports WebSocket and developers can build connection through DSN, which supports data writing, querying, and parameter binding, etc.
This article describes how to install `TDengine.Connector` in a Linux or Windows environment and connect to TDengine clusters via `TDengine.Connector` to perform basic operations such as data writing and querying.
......@@ -35,6 +35,10 @@ Please refer to [version support list](/reference/connector#version-support)
## Supported features
<Tabs defaultValue="native">
<TabItem value="native" label="Native Connection">
1. Connection Management
2. General Query
3. Continuous Query
......@@ -42,6 +46,18 @@ Please refer to [version support list](/reference/connector#version-support)
5. Subscription
6. Schemaless
</TabItem>
<TabItem value="rest" label="WebSocket Connection">
1. Connection Management
2. General Query
3. Continuous Query
4. Parameter Binding
</TabItem>
</Tabs>
## Installation Steps
### Pre-installation preparation
......@@ -74,12 +90,18 @@ cp -r src/ myProject
cd myProject
dotnet add exmaple.csproj reference src/TDengine.csproj
```
</TabItem>
</Tabs>
## Establish a Connection
``` C#
<Tabs defaultValue="native">
<TabItem value="native" label="Native Connection">
``` csharp
using TDengineDriver;
namespace TDengineExample
......@@ -112,14 +134,62 @@ namespace TDengineExample
```
</TabItem>
<TabItem value="rest" label="WebSocket Connection">
The structure of the DSN description string is as follows:
```text
[<protocol>]://[[<username>:<password>@]<host>:<port>][/<database>][?<p1>=<v1>[&<p2>=<v2>]]
|------------|---|-----------|-----------|------|------|------------|-----------------------|
| protocol | | username | password | host | port | database | params |
```
The parameters are described as follows:
* **protocol**: Specify which connection method to use (support http/ws). For example, `ws://localhost:6041` uses Websocket to establish connections.
* **username/password**: Username and password used to create connections.
* **host/port**: Specifies the server and port to establish a connection. Websocket connections default to `localhost:6041`.
* **database**: Specify the default database to connect to. It's optional.
* **params**:Optional parameters.
A sample DSN description string is as follows:
```text
ws://localhost:6041/test
```
``` csharp
{{#include docs/examples/csharp/wsConnect/Program.cs}}
```
</TabItem>
</Tabs>
## Usage examples
### Write data
#### SQL Write
<Tabs defaultValue="native">
<TabItem value="native" label="Native Connection">
<CSInsert />
</TabItem>
<TabItem value="rest" label="WebSocket Connection">
```csharp
{{#include docs/examples/csharp/wsInsert/Program.cs}}
```
</TabItem>
</Tabs>
#### InfluxDB line protocol write
<CSInfluxLine />
......@@ -132,12 +202,48 @@ namespace TDengineExample
<CSOpenTSDBJson />
#### Parameter Binding
<Tabs defaultValue="native">
<TabItem value="native" label="Native Connection">
``` csharp
{{#include docs/examples/csharp/stmtInsert/Program.cs}}
```
</TabItem>
<TabItem value="rest" label="WebSocket Connection">
```csharp
{{#include docs/examples/csharp/wsStmt/Program.cs}}
```
</TabItem>
</Tabs>
### Query data
#### Synchronous Query
<Tabs defaultValue="native">
<TabItem value="native" label="Native Connection">
<CSQuery />
</TabItem>
<TabItem value="rest" label="WebSocket Connection">
```csharp
{{#include docs/examples/csharp/wsQuery/Program.cs}}
```
</TabItem>
</Tabs>
#### Asynchronous query
<CSAsyncQuery />
......@@ -145,18 +251,21 @@ namespace TDengineExample
### More sample programs
|Sample program |Sample program description |
|--------------------------------------------------------------------------------------------------------------------|------------ --------------------------------|
|--------------------------------------------------------------------------------------------------------------------|--------------------------------------------|
| [CURD](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/Query/Query.cs) | Table creation, data insertion, and query examples with TDengine.Connector |
| [JSON Tag](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/JSONTag) | Writing and querying JSON tag data with TDengine Connector |
| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/Stmt) | Parameter binding with TDengine Connector |
| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/schemaless) | Schemaless writes with TDengine Connector |
| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/AsyncQuery/QueryAsync.cs) | Asynchronous queries with TDengine Connector |
| [TMQ](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/TMQ/TMQ.cs) | Data subscription with TDengine Connector |
| [Subscription](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/TMQ/TMQ.cs) | Subscription example with TDengine Connector |
| [Basic WebSocket Usage](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSample.cs) | WebSocket basic data in and out with TDengine connector |
| [WebSocket Parameter Binding](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSTMT.cs) | WebSocket parameter binding example |
## Important update records
| TDengine.Connector | Description |
|--------------------|--------------------------------|
| 3.0.1 | Support WebSocket and Cloud,With function query, insert, and parameter binding|
| 3.0.0 | Supports TDengine 3.0.0.0. TDengine 2.x is not supported. Added `TDengine.Impl.GetData()` interface to deserialize query results. |
| 1.0.7 | Fixed TDengine.Query() memory leak. |
| 1.0.6 | Fix schemaless bug in 1.0.4 and 1.0.5. |
......
......@@ -164,7 +164,7 @@ The parameters described in this document by the effect that they have on the sy
| Attribute | Description |
| -------- | -------------------- |
| Applicable | Client only |
| 含义 | SMA index optimization policy |
| Meaning | SMA index optimization policy |
| Unit | None |
| Default Value | 0 |
| Notes |
......@@ -325,7 +325,7 @@ The charset that takes effect is UTF-8.
| Applicable | Server Only |
| Meaning | Maximum number of vnodes per dnode |
| Value Range | 0-4096 |
| Default Value | 256 |
| Default Value | 2x the CPU cores |
## Time Parameters
......@@ -697,152 +697,154 @@ To prevent system resource from being exhausted by multiple concurrent streams,
| 15 | telemetryPort | No | Yes |
| 16 | queryPolicy | No | Yes |
| 17 | querySmaOptimize | No | Yes |
| 18 | queryBufferSize | Yes | Yes |
| 19 | maxNumOfDistinctRes | Yes | Yes |
| 20 | minSlidingTime | Yes | Yes |
| 21 | minIntervalTime | Yes | Yes |
| 22 | countAlwaysReturnValue | Yes | Yes |
| 23 | dataDir | Yes | Yes |
| 24 | minimalDataDirGB | Yes | Yes |
| 25 | supportVnodes | No | Yes |
| 26 | tempDir | Yes | Yes |
| 27 | minimalTmpDirGB | Yes | Yes |
| 28 | compressMsgSize | Yes | Yes |
| 29 | compressColData | Yes | Yes |
| 30 | smlChildTableName | Yes | Yes |
| 31 | smlTagName | Yes | Yes |
| 32 | smlDataFormat | No | Yes |
| 33 | statusInterval | Yes | Yes |
| 34 | shellActivityTimer | Yes | Yes |
| 35 | transPullupInterval | No | Yes |
| 36 | mqRebalanceInterval | No | Yes |
| 37 | ttlUnit | No | Yes |
| 38 | ttlPushInterval | No | Yes |
| 39 | numOfTaskQueueThreads | No | Yes |
| 40 | numOfRpcThreads | No | Yes |
| 41 | numOfCommitThreads | Yes | Yes |
| 42 | numOfMnodeReadThreads | No | Yes |
| 43 | numOfVnodeQueryThreads | No | Yes |
| 44 | numOfVnodeStreamThreads | No | Yes |
| 45 | numOfVnodeFetchThreads | No | Yes |
| 46 | numOfVnodeWriteThreads | No | Yes |
| 47 | numOfVnodeSyncThreads | No | Yes |
| 48 | numOfQnodeQueryThreads | No | Yes |
| 49 | numOfQnodeFetchThreads | No | Yes |
| 50 | numOfSnodeSharedThreads | No | Yes |
| 51 | numOfSnodeUniqueThreads | No | Yes |
| 52 | rpcQueueMemoryAllowed | No | Yes |
| 53 | logDir | Yes | Yes |
| 54 | minimalLogDirGB | Yes | Yes |
| 55 | numOfLogLines | Yes | Yes |
| 56 | asyncLog | Yes | Yes |
| 57 | logKeepDays | Yes | Yes |
| 58 | debugFlag | Yes | Yes |
| 59 | tmrDebugFlag | Yes | Yes |
| 60 | uDebugFlag | Yes | Yes |
| 61 | rpcDebugFlag | Yes | Yes |
| 62 | jniDebugFlag | Yes | Yes |
| 63 | qDebugFlag | Yes | Yes |
| 64 | cDebugFlag | Yes | Yes |
| 65 | dDebugFlag | Yes | Yes |
| 66 | vDebugFlag | Yes | Yes |
| 67 | mDebugFlag | Yes | Yes |
| 68 | wDebugFlag | Yes | Yes |
| 69 | sDebugFlag | Yes | Yes |
| 70 | tsdbDebugFlag | Yes | Yes |
| 71 | tqDebugFlag | No | Yes |
| 72 | fsDebugFlag | Yes | Yes |
| 73 | udfDebugFlag | No | Yes |
| 74 | smaDebugFlag | No | Yes |
| 75 | idxDebugFlag | No | Yes |
| 76 | tdbDebugFlag | No | Yes |
| 77 | metaDebugFlag | No | Yes |
| 78 | timezone | Yes | Yes |
| 79 | locale | Yes | Yes |
| 80 | charset | Yes | Yes |
| 81 | udf | Yes | Yes |
| 82 | enableCoreFile | Yes | Yes |
| 83 | arbitrator | Yes | No |
| 84 | numOfThreadsPerCore | Yes | No |
| 85 | numOfMnodes | Yes | No |
| 86 | vnodeBak | Yes | No |
| 87 | balance | Yes | No |
| 88 | balanceInterval | Yes | No |
| 89 | offlineThreshold | Yes | No |
| 90 | role | Yes | No |
| 91 | dnodeNopLoop | Yes | No |
| 92 | keepTimeOffset | Yes | No |
| 93 | rpcTimer | Yes | No |
| 94 | rpcMaxTime | Yes | No |
| 95 | rpcForceTcp | Yes | No |
| 96 | tcpConnTimeout | Yes | No |
| 97 | syncCheckInterval | Yes | No |
| 98 | maxTmrCtrl | Yes | No |
| 99 | monitorReplica | Yes | No |
| 100 | smlTagNullName | Yes | No |
| 101 | keepColumnName | Yes | No |
| 102 | ratioOfQueryCores | Yes | No |
| 103 | maxStreamCompDelay | Yes | No |
| 104 | maxFirstStreamCompDelay | Yes | No |
| 105 | retryStreamCompDelay | Yes | No |
| 106 | streamCompDelayRatio | Yes | No |
| 107 | maxVgroupsPerDb | Yes | No |
| 108 | maxTablesPerVnode | Yes | No |
| 109 | minTablesPerVnode | Yes | No |
| 110 | tableIncStepPerVnode | Yes | No |
| 111 | cache | Yes | No |
| 112 | blocks | Yes | No |
| 113 | days | Yes | No |
| 114 | keep | Yes | No |
| 115 | minRows | Yes | No |
| 116 | maxRows | Yes | No |
| 117 | quorum | Yes | No |
| 118 | comp | Yes | No |
| 119 | walLevel | Yes | No |
| 120 | fsync | Yes | No |
| 121 | replica | Yes | No |
| 122 | partitions | Yes | No |
| 123 | quorum | Yes | No |
| 124 | update | Yes | No |
| 125 | cachelast | Yes | No |
| 126 | maxSQLLength | Yes | No |
| 127 | maxWildCardsLength | Yes | No |
| 128 | maxRegexStringLen | Yes | No |
| 129 | maxNumOfOrderedRes | Yes | No |
| 130 | maxConnections | Yes | No |
| 131 | mnodeEqualVnodeNum | Yes | No |
| 132 | http | Yes | No |
| 133 | httpEnableRecordSql | Yes | No |
| 134 | httpMaxThreads | Yes | No |
| 135 | restfulRowLimit | Yes | No |
| 136 | httpDbNameMandatory | Yes | No |
| 137 | httpKeepAlive | Yes | No |
| 138 | enableRecordSql | Yes | No |
| 139 | maxBinaryDisplayWidth | Yes | No |
| 140 | stream | Yes | No |
| 141 | retrieveBlockingModel | Yes | No |
| 142 | tsdbMetaCompactRatio | Yes | No |
| 143 | defaultJSONStrType | Yes | No |
| 144 | walFlushSize | Yes | No |
| 145 | keepTimeOffset | Yes | No |
| 146 | flowctrl | Yes | No |
| 147 | slaveQuery | Yes | No |
| 148 | adjustMaster | Yes | No |
| 149 | topicBinaryLen | Yes | No |
| 150 | telegrafUseFieldNum | Yes | No |
| 151 | deadLockKillQuery | Yes | No |
| 152 | clientMerge | Yes | No |
| 153 | sdbDebugFlag | Yes | No |
| 154 | odbcDebugFlag | Yes | No |
| 155 | httpDebugFlag | Yes | No |
| 156 | monDebugFlag | Yes | No |
| 157 | cqDebugFlag | Yes | No |
| 158 | shortcutFlag | Yes | No |
| 159 | probeSeconds | Yes | No |
| 160 | probeKillSeconds | Yes | No |
| 161 | probeInterval | Yes | No |
| 162 | lossyColumns | Yes | No |
| 163 | fPrecision | Yes | No |
| 164 | dPrecision | Yes | No |
| 165 | maxRange | Yes | No |
| 166 | range | Yes | No |
| 18 | queryRsmaTolerance | No | Yes |
| 19 | queryBufferSize | Yes | Yes |
| 20 | maxNumOfDistinctRes | Yes | Yes |
| 21 | minSlidingTime | Yes | Yes |
| 22 | minIntervalTime | Yes | Yes |
| 23 | countAlwaysReturnValue | Yes | Yes |
| 24 | dataDir | Yes | Yes |
| 25 | minimalDataDirGB | Yes | Yes |
| 26 | supportVnodes | No | Yes |
| 27 | tempDir | Yes | Yes |
| 28 | minimalTmpDirGB | Yes | Yes |
| 29 | compressMsgSize | Yes | Yes |
| 30 | compressColData | Yes | Yes |
| 31 | smlChildTableName | Yes | Yes |
| 32 | smlTagName | Yes | Yes |
| 33 | smlDataFormat | No | Yes |
| 34 | statusInterval | Yes | Yes |
| 35 | shellActivityTimer | Yes | Yes |
| 36 | transPullupInterval | No | Yes |
| 37 | mqRebalanceInterval | No | Yes |
| 38 | ttlUnit | No | Yes |
| 39 | ttlPushInterval | No | Yes |
| 40 | numOfTaskQueueThreads | No | Yes |
| 41 | numOfRpcThreads | No | Yes |
| 42 | numOfCommitThreads | Yes | Yes |
| 43 | numOfMnodeReadThreads | No | Yes |
| 44 | numOfVnodeQueryThreads | No | Yes |
| 45 | numOfVnodeStreamThreads | No | Yes |
| 46 | numOfVnodeFetchThreads | No | Yes |
| 47 | numOfVnodeWriteThreads | No | Yes |
| 48 | numOfVnodeSyncThreads | No | Yes |
| 49 | numOfVnodeRsmaThreads | No | Yes |
| 50 | numOfQnodeQueryThreads | No | Yes |
| 51 | numOfQnodeFetchThreads | No | Yes |
| 52 | numOfSnodeSharedThreads | No | Yes |
| 53 | numOfSnodeUniqueThreads | No | Yes |
| 54 | rpcQueueMemoryAllowed | No | Yes |
| 55 | logDir | Yes | Yes |
| 56 | minimalLogDirGB | Yes | Yes |
| 57 | numOfLogLines | Yes | Yes |
| 58 | asyncLog | Yes | Yes |
| 59 | logKeepDays | Yes | Yes |
| 60 | debugFlag | Yes | Yes |
| 61 | tmrDebugFlag | Yes | Yes |
| 62 | uDebugFlag | Yes | Yes |
| 63 | rpcDebugFlag | Yes | Yes |
| 64 | jniDebugFlag | Yes | Yes |
| 65 | qDebugFlag | Yes | Yes |
| 66 | cDebugFlag | Yes | Yes |
| 67 | dDebugFlag | Yes | Yes |
| 68 | vDebugFlag | Yes | Yes |
| 69 | mDebugFlag | Yes | Yes |
| 70 | wDebugFlag | Yes | Yes |
| 71 | sDebugFlag | Yes | Yes |
| 72 | tsdbDebugFlag | Yes | Yes |
| 73 | tqDebugFlag | No | Yes |
| 74 | fsDebugFlag | Yes | Yes |
| 75 | udfDebugFlag | No | Yes |
| 76 | smaDebugFlag | No | Yes |
| 77 | idxDebugFlag | No | Yes |
| 78 | tdbDebugFlag | No | Yes |
| 79 | metaDebugFlag | No | Yes |
| 80 | timezone | Yes | Yes |
| 81 | locale | Yes | Yes |
| 82 | charset | Yes | Yes |
| 83 | udf | Yes | Yes |
| 84 | enableCoreFile | Yes | Yes |
| 85 | arbitrator | Yes | No |
| 86 | numOfThreadsPerCore | Yes | No |
| 87 | numOfMnodes | Yes | No |
| 88 | vnodeBak | Yes | No |
| 89 | balance | Yes | No |
| 90 | balanceInterval | Yes | No |
| 91 | offlineThreshold | Yes | No |
| 92 | role | Yes | No |
| 93 | dnodeNopLoop | Yes | No |
| 94 | keepTimeOffset | Yes | No |
| 95 | rpcTimer | Yes | No |
| 96 | rpcMaxTime | Yes | No |
| 97 | rpcForceTcp | Yes | No |
| 98 | tcpConnTimeout | Yes | No |
| 99 | syncCheckInterval | Yes | No |
| 100 | maxTmrCtrl | Yes | No |
| 101 | monitorReplica | Yes | No |
| 102 | smlTagNullName | Yes | No |
| 103 | keepColumnName | Yes | No |
| 104 | ratioOfQueryCores | Yes | No |
| 105 | maxStreamCompDelay | Yes | No |
| 106 | maxFirstStreamCompDelay | Yes | No |
| 107 | retryStreamCompDelay | Yes | No |
| 108 | streamCompDelayRatio | Yes | No |
| 109 | maxVgroupsPerDb | Yes | No |
| 110 | maxTablesPerVnode | Yes | No |
| 111 | minTablesPerVnode | Yes | No |
| 112 | tableIncStepPerVnode | Yes | No |
| 113 | cache | Yes | No |
| 114 | blocks | Yes | No |
| 115 | days | Yes | No |
| 116 | keep | Yes | No |
| 117 | minRows | Yes | No |
| 118 | maxRows | Yes | No |
| 119 | quorum | Yes | No |
| 120 | comp | Yes | No |
| 121 | walLevel | Yes | No |
| 122 | fsync | Yes | No |
| 123 | replica | Yes | No |
| 124 | partitions | Yes | No |
| 125 | quorum | Yes | No |
| 126 | update | Yes | No |
| 127 | cachelast | Yes | No |
| 128 | maxSQLLength | Yes | No |
| 129 | maxWildCardsLength | Yes | No |
| 130 | maxRegexStringLen | Yes | No |
| 131 | maxNumOfOrderedRes | Yes | No |
| 132 | maxConnections | Yes | No |
| 133 | mnodeEqualVnodeNum | Yes | No |
| 134 | http | Yes | No |
| 135 | httpEnableRecordSql | Yes | No |
| 136 | httpMaxThreads | Yes | No |
| 137 | restfulRowLimit | Yes | No |
| 138 | httpDbNameMandatory | Yes | No |
| 139 | httpKeepAlive | Yes | No |
| 140 | enableRecordSql | Yes | No |
| 141 | maxBinaryDisplayWidth | Yes | No |
| 142 | stream | Yes | No |
| 143 | retrieveBlockingModel | Yes | No |
| 144 | tsdbMetaCompactRatio | Yes | No |
| 145 | defaultJSONStrType | Yes | No |
| 146 | walFlushSize | Yes | No |
| 147 | keepTimeOffset | Yes | No |
| 148 | flowctrl | Yes | No |
| 149 | slaveQuery | Yes | No |
| 150 | adjustMaster | Yes | No |
| 151 | topicBinaryLen | Yes | No |
| 152 | telegrafUseFieldNum | Yes | No |
| 153 | deadLockKillQuery | Yes | No |
| 154 | clientMerge | Yes | No |
| 155 | sdbDebugFlag | Yes | No |
| 156 | odbcDebugFlag | Yes | No |
| 157 | httpDebugFlag | Yes | No |
| 158 | monDebugFlag | Yes | No |
| 159 | cqDebugFlag | Yes | No |
| 160 | shortcutFlag | Yes | No |
| 161 | probeSeconds | Yes | No |
| 162 | probeKillSeconds | Yes | No |
| 163 | probeInterval | Yes | No |
| 164 | lossyColumns | Yes | No |
| 165 | fPrecision | Yes | No |
| 166 | dPrecision | Yes | No |
| 167 | maxRange | Yes | No |
| 168 | range | Yes | No |
......@@ -6,6 +6,9 @@ description: TDengine release history, Release Notes and download links.
import Release from "/components/ReleaseV3";
## 3.0.1.2
<Release type="tdengine" version="3.0.1.2" />
## 3.0.1.1
......
......@@ -6,6 +6,10 @@ description: taosTools release history, Release Notes, download links.
import Release from "/components/ReleaseV3";
## 2.2.2
<Release type="tools" version="2.2.2" />
## 2.2.0
<Release type="tools" version="2.2.0" />
......
bin
obj
.vs
*.sln
\ No newline at end of file
asyncQuery/bin
connect/bin
influxdbLine/bin
optsJSON/bin
optsTelnet/bin
query/bin
sqlInsert/bin
stmtInsert/bin
subscribe/bin
wsConnect/bin
wsInsert/bin
wsQuery/bin
wsStmt/bin
asyncQuery/obj
connect/obj
influxdbLine/obj
optsJSON/obj
optsTelnet/obj
query/obj
sqlInsert/obj
stmtInsert/obj
subscribe/obj
wsConnect/obj
wsInsert/obj
wsQuery/obj
wsStmt/obj
\ No newline at end of file
......@@ -11,11 +11,17 @@ namespace TDengineExample
static void Main()
{
IntPtr conn = GetConnection();
QueryAsyncCallback queryAsyncCallback = new QueryAsyncCallback(QueryCallback);
TDengine.QueryAsync(conn, "select * from meters", queryAsyncCallback, IntPtr.Zero);
Thread.Sleep(2000);
TDengine.Close(conn);
TDengine.Cleanup();
try
{
QueryAsyncCallback queryAsyncCallback = new QueryAsyncCallback(QueryCallback);
TDengine.QueryAsync(conn, "select * from meters", queryAsyncCallback, IntPtr.Zero);
Thread.Sleep(2000);
}
finally
{
TDengine.Close(conn);
}
}
static void QueryCallback(IntPtr param, IntPtr taosRes, int code)
......@@ -27,11 +33,11 @@ namespace TDengineExample
}
else
{
Console.WriteLine($"async query data failed, failed code {code}");
throw new Exception($"async query data failed,code:{code},reason:{TDengine.Error(taosRes)}");
}
}
// Iteratively call this interface until "numOfRows" is no greater than 0.
// Iteratively call this interface until "numOfRows" is no greater than 0.
static void FetchRawBlockCallback(IntPtr param, IntPtr taosRes, int numOfRows)
{
if (numOfRows > 0)
......@@ -43,7 +49,7 @@ namespace TDengineExample
for (int i = 0; i < dataList.Count; i++)
{
if (i != 0 && (i+1) % metaList.Count == 0)
if (i != 0 && (i + 1) % metaList.Count == 0)
{
Console.WriteLine("{0}\t|", dataList[i]);
}
......@@ -63,7 +69,7 @@ namespace TDengineExample
}
else
{
Console.WriteLine($"FetchRawBlockCallback callback error, error code {numOfRows}");
throw new Exception($"FetchRawBlockCallback callback error, error code {numOfRows}");
}
TDengine.FreeResult(taosRes);
}
......@@ -79,8 +85,7 @@ namespace TDengineExample
var conn = TDengine.Connect(host, username, password, dbname, port);
if (conn == IntPtr.Zero)
{
Console.WriteLine("Connect to TDengine failed");
Environment.Exit(0);
throw new Exception("Connect to TDengine failed");
}
else
{
......
......@@ -9,7 +9,7 @@
</PropertyGroup>
<ItemGroup>
<PackageReference Include="TDengine.Connector" Version="3.0.0" />
<PackageReference Include="TDengine.Connector" Version="3.0.1" />
</ItemGroup>
</Project>
......@@ -16,7 +16,7 @@ namespace TDengineExample
var conn = TDengine.Connect(host, username, password, dbname, port);
if (conn == IntPtr.Zero)
{
Console.WriteLine("Connect to TDengine failed");
throw new Exception("Connect to TDengine failed");
}
else
{
......

Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio Version 16
VisualStudioVersion = 16.0.30114.105
MinimumVisualStudioVersion = 10.0.40219.1
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "asyncquery", "asyncQuery\asyncquery.csproj", "{E2A5F00C-14E7-40E1-A2DE-6AB2975616D3}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "connect", "connect\connect.csproj", "{CCC5042D-93FC-4AE0-B2F6-7E692FD476B7}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "influxdbline", "influxdbLine\influxdbline.csproj", "{6A24FB80-1E3C-4E2D-A5AB-914FA583874D}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "optsJSON", "optsJSON\optsJSON.csproj", "{6725A961-0C66-4196-AC98-8D3F3D757D6C}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "optstelnet", "optsTelnet\optstelnet.csproj", "{B3B50D25-688B-44D4-8683-482ABC52FFCA}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "query", "query\query.csproj", "{F2B7D13B-FE04-4C5C-BB6D-C12E0A9D9970}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "stmtinsert", "stmtInsert\stmtinsert.csproj", "{B40D6BED-BE3C-4B44-9B12-28BE441311BA}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "subscribe", "subscribe\subscribe.csproj", "{C3D45A8E-AFC0-4547-9F3C-467B0B583DED}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "wsConnect", "wsConnect\wsConnect.csproj", "{51E19494-845E-49ED-97C7-749AE63111BD}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "wsInsert", "wsInsert\wsInsert.csproj", "{13E2233B-4AFF-40D9-AF42-AB3F01617540}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "wsQuery", "wsQuery\wsQuery.csproj", "{0F394169-C456-442C-929D-C2D43A0EEC7B}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "wsStmt", "wsStmt\wsStmt.csproj", "{27B9C9AB-9055-4BF2-8A14-4E59F09D5985}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "sqlinsert", "sqlInsert\sqlinsert.csproj", "{CD24BD12-8550-4627-A11D-707B446F48C3}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
Release|Any CPU = Release|Any CPU
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{E2A5F00C-14E7-40E1-A2DE-6AB2975616D3}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{E2A5F00C-14E7-40E1-A2DE-6AB2975616D3}.Debug|Any CPU.Build.0 = Debug|Any CPU
{E2A5F00C-14E7-40E1-A2DE-6AB2975616D3}.Release|Any CPU.ActiveCfg = Release|Any CPU
{E2A5F00C-14E7-40E1-A2DE-6AB2975616D3}.Release|Any CPU.Build.0 = Release|Any CPU
{CCC5042D-93FC-4AE0-B2F6-7E692FD476B7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{CCC5042D-93FC-4AE0-B2F6-7E692FD476B7}.Debug|Any CPU.Build.0 = Debug|Any CPU
{CCC5042D-93FC-4AE0-B2F6-7E692FD476B7}.Release|Any CPU.ActiveCfg = Release|Any CPU
{CCC5042D-93FC-4AE0-B2F6-7E692FD476B7}.Release|Any CPU.Build.0 = Release|Any CPU
{6A24FB80-1E3C-4E2D-A5AB-914FA583874D}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{6A24FB80-1E3C-4E2D-A5AB-914FA583874D}.Debug|Any CPU.Build.0 = Debug|Any CPU
{6A24FB80-1E3C-4E2D-A5AB-914FA583874D}.Release|Any CPU.ActiveCfg = Release|Any CPU
{6A24FB80-1E3C-4E2D-A5AB-914FA583874D}.Release|Any CPU.Build.0 = Release|Any CPU
{6725A961-0C66-4196-AC98-8D3F3D757D6C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{6725A961-0C66-4196-AC98-8D3F3D757D6C}.Debug|Any CPU.Build.0 = Debug|Any CPU
{6725A961-0C66-4196-AC98-8D3F3D757D6C}.Release|Any CPU.ActiveCfg = Release|Any CPU
{6725A961-0C66-4196-AC98-8D3F3D757D6C}.Release|Any CPU.Build.0 = Release|Any CPU
{B3B50D25-688B-44D4-8683-482ABC52FFCA}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{B3B50D25-688B-44D4-8683-482ABC52FFCA}.Debug|Any CPU.Build.0 = Debug|Any CPU
{B3B50D25-688B-44D4-8683-482ABC52FFCA}.Release|Any CPU.ActiveCfg = Release|Any CPU
{B3B50D25-688B-44D4-8683-482ABC52FFCA}.Release|Any CPU.Build.0 = Release|Any CPU
{F2B7D13B-FE04-4C5C-BB6D-C12E0A9D9970}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{F2B7D13B-FE04-4C5C-BB6D-C12E0A9D9970}.Debug|Any CPU.Build.0 = Debug|Any CPU
{F2B7D13B-FE04-4C5C-BB6D-C12E0A9D9970}.Release|Any CPU.ActiveCfg = Release|Any CPU
{F2B7D13B-FE04-4C5C-BB6D-C12E0A9D9970}.Release|Any CPU.Build.0 = Release|Any CPU
{B40D6BED-BE3C-4B44-9B12-28BE441311BA}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{B40D6BED-BE3C-4B44-9B12-28BE441311BA}.Debug|Any CPU.Build.0 = Debug|Any CPU
{B40D6BED-BE3C-4B44-9B12-28BE441311BA}.Release|Any CPU.ActiveCfg = Release|Any CPU
{B40D6BED-BE3C-4B44-9B12-28BE441311BA}.Release|Any CPU.Build.0 = Release|Any CPU
{C3D45A8E-AFC0-4547-9F3C-467B0B583DED}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{C3D45A8E-AFC0-4547-9F3C-467B0B583DED}.Debug|Any CPU.Build.0 = Debug|Any CPU
{C3D45A8E-AFC0-4547-9F3C-467B0B583DED}.Release|Any CPU.ActiveCfg = Release|Any CPU
{C3D45A8E-AFC0-4547-9F3C-467B0B583DED}.Release|Any CPU.Build.0 = Release|Any CPU
{51E19494-845E-49ED-97C7-749AE63111BD}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{51E19494-845E-49ED-97C7-749AE63111BD}.Debug|Any CPU.Build.0 = Debug|Any CPU
{51E19494-845E-49ED-97C7-749AE63111BD}.Release|Any CPU.ActiveCfg = Release|Any CPU
{51E19494-845E-49ED-97C7-749AE63111BD}.Release|Any CPU.Build.0 = Release|Any CPU
{13E2233B-4AFF-40D9-AF42-AB3F01617540}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{13E2233B-4AFF-40D9-AF42-AB3F01617540}.Debug|Any CPU.Build.0 = Debug|Any CPU
{13E2233B-4AFF-40D9-AF42-AB3F01617540}.Release|Any CPU.ActiveCfg = Release|Any CPU
{13E2233B-4AFF-40D9-AF42-AB3F01617540}.Release|Any CPU.Build.0 = Release|Any CPU
{0F394169-C456-442C-929D-C2D43A0EEC7B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{0F394169-C456-442C-929D-C2D43A0EEC7B}.Debug|Any CPU.Build.0 = Debug|Any CPU
{0F394169-C456-442C-929D-C2D43A0EEC7B}.Release|Any CPU.ActiveCfg = Release|Any CPU
{0F394169-C456-442C-929D-C2D43A0EEC7B}.Release|Any CPU.Build.0 = Release|Any CPU
{27B9C9AB-9055-4BF2-8A14-4E59F09D5985}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{27B9C9AB-9055-4BF2-8A14-4E59F09D5985}.Debug|Any CPU.Build.0 = Debug|Any CPU
{27B9C9AB-9055-4BF2-8A14-4E59F09D5985}.Release|Any CPU.ActiveCfg = Release|Any CPU
{27B9C9AB-9055-4BF2-8A14-4E59F09D5985}.Release|Any CPU.Build.0 = Release|Any CPU
{CD24BD12-8550-4627-A11D-707B446F48C3}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{CD24BD12-8550-4627-A11D-707B446F48C3}.Debug|Any CPU.Build.0 = Debug|Any CPU
{CD24BD12-8550-4627-A11D-707B446F48C3}.Release|Any CPU.ActiveCfg = Release|Any CPU
{CD24BD12-8550-4627-A11D-707B446F48C3}.Release|Any CPU.Build.0 = Release|Any CPU
EndGlobalSection
EndGlobal
......@@ -17,8 +17,7 @@ namespace TDengineExample
IntPtr res = TDengine.SchemalessInsert(conn, lines, lines.Length, (int)TDengineSchemalessProtocol.TSDB_SML_LINE_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_MILLI_SECONDS);
if (TDengine.ErrorNo(res) != 0)
{
Console.WriteLine("SchemalessInsert failed since " + TDengine.Error(res));
ExitProgram(conn, 1);
throw new Exception("SchemalessInsert failed since " + TDengine.Error(res));
}
else
{
......@@ -26,7 +25,6 @@ namespace TDengineExample
Console.WriteLine($"SchemalessInsert success, affected {affectedRows} rows");
}
TDengine.FreeResult(res);
ExitProgram(conn, 0);
}
static IntPtr GetConnection()
......@@ -39,9 +37,7 @@ namespace TDengineExample
var conn = TDengine.Connect(host, username, password, dbname, port);
if (conn == IntPtr.Zero)
{
Console.WriteLine("Connect to TDengine failed");
TDengine.Cleanup();
Environment.Exit(1);
throw new Exception("Connect to TDengine failed");
}
else
{
......@@ -55,23 +51,15 @@ namespace TDengineExample
IntPtr res = TDengine.Query(conn, "CREATE DATABASE test");
if (TDengine.ErrorNo(res) != 0)
{
Console.WriteLine("failed to create database, reason: " + TDengine.Error(res));
ExitProgram(conn, 1);
throw new Exception("failed to create database, reason: " + TDengine.Error(res));
}
res = TDengine.Query(conn, "USE test");
if (TDengine.ErrorNo(res) != 0)
{
Console.WriteLine("failed to change database, reason: " + TDengine.Error(res));
ExitProgram(conn, 1);
throw new Exception("failed to change database, reason: " + TDengine.Error(res));
}
}
static void ExitProgram(IntPtr conn, int exitCode)
{
TDengine.Close(conn);
TDengine.Cleanup();
Environment.Exit(exitCode);
}
}
}
......@@ -7,27 +7,31 @@ namespace TDengineExample
static void Main()
{
IntPtr conn = GetConnection();
PrepareDatabase(conn);
string[] lines = { "[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," +
try
{
PrepareDatabase(conn);
string[] lines = { "[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," +
" {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, \"value\": 219, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}, " +
"{\"metric\": \"meters.current\", \"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," +
" {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611250, \"value\": 221, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}]"
};
IntPtr res = TDengine.SchemalessInsert(conn, lines, 1, (int)TDengineSchemalessProtocol.TSDB_SML_JSON_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
if (TDengine.ErrorNo(res) != 0)
{
Console.WriteLine("SchemalessInsert failed since " + TDengine.Error(res));
ExitProgram(conn, 1);
IntPtr res = TDengine.SchemalessInsert(conn, lines, 1, (int)TDengineSchemalessProtocol.TSDB_SML_JSON_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
if (TDengine.ErrorNo(res) != 0)
{
throw new Exception("SchemalessInsert failed since " + TDengine.Error(res));
}
else
{
int affectedRows = TDengine.AffectRows(res);
Console.WriteLine($"SchemalessInsert success, affected {affectedRows} rows");
}
TDengine.FreeResult(res);
}
else
finally
{
int affectedRows = TDengine.AffectRows(res);
Console.WriteLine($"SchemalessInsert success, affected {affectedRows} rows");
TDengine.Close(conn);
}
TDengine.FreeResult(res);
ExitProgram(conn, 0);
}
static IntPtr GetConnection()
{
......@@ -39,9 +43,7 @@ namespace TDengineExample
var conn = TDengine.Connect(host, username, password, dbname, port);
if (conn == IntPtr.Zero)
{
Console.WriteLine("Connect to TDengine failed");
TDengine.Cleanup();
Environment.Exit(1);
throw new Exception("Connect to TDengine failed");
}
else
{
......@@ -55,22 +57,13 @@ namespace TDengineExample
IntPtr res = TDengine.Query(conn, "CREATE DATABASE test");
if (TDengine.ErrorNo(res) != 0)
{
Console.WriteLine("failed to create database, reason: " + TDengine.Error(res));
ExitProgram(conn, 1);
throw new Exception("failed to create database, reason: " + TDengine.Error(res));
}
res = TDengine.Query(conn, "USE test");
if (TDengine.ErrorNo(res) != 0)
{
Console.WriteLine("failed to change database, reason: " + TDengine.Error(res));
ExitProgram(conn, 1);
throw new Exception("failed to change database, reason: " + TDengine.Error(res));
}
}
static void ExitProgram(IntPtr conn, int exitCode)
{
TDengine.Close(conn);
TDengine.Cleanup();
Environment.Exit(exitCode);
}
}
}
......@@ -7,8 +7,10 @@ namespace TDengineExample
static void Main()
{
IntPtr conn = GetConnection();
PrepareDatabase(conn);
string[] lines = {
try
{
PrepareDatabase(conn);
string[] lines = {
"meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2",
"meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2",
"meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3",
......@@ -18,20 +20,22 @@ namespace TDengineExample
"meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3",
"meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3",
};
IntPtr res = TDengine.SchemalessInsert(conn, lines, lines.Length, (int)TDengineSchemalessProtocol.TSDB_SML_TELNET_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
if (TDengine.ErrorNo(res) != 0)
{
Console.WriteLine("SchemalessInsert failed since " + TDengine.Error(res));
ExitProgram(conn, 1);
IntPtr res = TDengine.SchemalessInsert(conn, lines, lines.Length, (int)TDengineSchemalessProtocol.TSDB_SML_TELNET_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
if (TDengine.ErrorNo(res) != 0)
{
throw new Exception("SchemalessInsert failed since " + TDengine.Error(res));
}
else
{
int affectedRows = TDengine.AffectRows(res);
Console.WriteLine($"SchemalessInsert success, affected {affectedRows} rows");
}
TDengine.FreeResult(res);
}
else
catch
{
int affectedRows = TDengine.AffectRows(res);
Console.WriteLine($"SchemalessInsert success, affected {affectedRows} rows");
TDengine.Close(conn);
}
TDengine.FreeResult(res);
ExitProgram(conn, 0);
}
static IntPtr GetConnection()
{
......@@ -43,9 +47,7 @@ namespace TDengineExample
var conn = TDengine.Connect(host, username, password, dbname, port);
if (conn == IntPtr.Zero)
{
Console.WriteLine("Connect to TDengine failed");
TDengine.Cleanup();
Environment.Exit(1);
throw new Exception("Connect to TDengine failed");
}
else
{
......@@ -59,22 +61,13 @@ namespace TDengineExample
IntPtr res = TDengine.Query(conn, "CREATE DATABASE test");
if (TDengine.ErrorNo(res) != 0)
{
Console.WriteLine("failed to create database, reason: " + TDengine.Error(res));
ExitProgram(conn, 1);
throw new Exception("failed to create database, reason: " + TDengine.Error(res));
}
res = TDengine.Query(conn, "USE test");
if (TDengine.ErrorNo(res) != 0)
{
Console.WriteLine("failed to change database, reason: " + TDengine.Error(res));
ExitProgram(conn, 1);
throw new Exception("failed to change database, reason: " + TDengine.Error(res));
}
}
static void ExitProgram(IntPtr conn, int exitCode)
{
TDengine.Close(conn);
TDengine.Cleanup();
Environment.Exit(exitCode);
}
}
}
......@@ -9,48 +9,47 @@ namespace TDengineExample
static void Main()
{
IntPtr conn = GetConnection();
// run query
IntPtr res = TDengine.Query(conn, "SELECT * FROM meters LIMIT 2");
if (TDengine.ErrorNo(res) != 0)
try
{
Console.WriteLine("Failed to query since: " + TDengine.Error(res));
TDengine.Close(conn);
TDengine.Cleanup();
return;
}
// run query
IntPtr res = TDengine.Query(conn, "SELECT * FROM meters LIMIT 2");
if (TDengine.ErrorNo(res) != 0)
{
throw new Exception("Failed to query since: " + TDengine.Error(res));
}
// get filed count
int fieldCount = TDengine.FieldCount(res);
Console.WriteLine("fieldCount=" + fieldCount);
// get filed count
int fieldCount = TDengine.FieldCount(res);
Console.WriteLine("fieldCount=" + fieldCount);
// print column names
List<TDengineMeta> metas = LibTaos.GetMeta(res);
for (int i = 0; i < metas.Count; i++)
{
Console.Write(metas[i].name + "\t");
}
Console.WriteLine();
// print column names
List<TDengineMeta> metas = LibTaos.GetMeta(res);
for (int i = 0; i < metas.Count; i++)
{
Console.Write(metas[i].name + "\t");
}
Console.WriteLine();
// print values
List<Object> resData = LibTaos.GetData(res);
for (int i = 0; i < resData.Count; i++)
{
Console.Write($"|{resData[i].ToString()} \t");
if (((i + 1) % metas.Count == 0))
// print values
List<Object> resData = LibTaos.GetData(res);
for (int i = 0; i < resData.Count; i++)
{
Console.WriteLine("");
Console.Write($"|{resData[i].ToString()} \t");
if (((i + 1) % metas.Count == 0))
{
Console.WriteLine("");
}
}
Console.WriteLine();
// Free result after use
TDengine.FreeResult(res);
}
Console.WriteLine();
if (TDengine.ErrorNo(res) != 0)
finally
{
Console.WriteLine($"Query is not complete, Error {TDengine.ErrorNo(res)} {TDengine.Error(res)}");
TDengine.Close(conn);
}
// exit
TDengine.FreeResult(res);
TDengine.Close(conn);
TDengine.Cleanup();
}
static IntPtr GetConnection()
{
......@@ -62,8 +61,7 @@ namespace TDengineExample
var conn = TDengine.Connect(host, username, password, dbname, port);
if (conn == IntPtr.Zero)
{
Console.WriteLine("Connect to TDengine failed");
System.Environment.Exit(0);
throw new Exception("Connect to TDengine failed");
}
else
{
......
......@@ -9,7 +9,7 @@
</PropertyGroup>
<ItemGroup>
<PackageReference Include="TDengine.Connector" Version="3.0.0" />
<PackageReference Include="TDengine.Connector" Version="3.0.1" />
</ItemGroup>
</Project>
......@@ -9,22 +9,29 @@ namespace TDengineExample
static void Main()
{
IntPtr conn = GetConnection();
IntPtr res = TDengine.Query(conn, "CREATE DATABASE power");
CheckRes(conn, res, "failed to create database");
res = TDengine.Query(conn, "USE power");
CheckRes(conn, res, "failed to change database");
res = TDengine.Query(conn, "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)");
CheckRes(conn, res, "failed to create stable");
var sql = "INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) " +
"d1002 USING power.meters TAGS('California.SanFrancisco', 3) VALUES('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) " +
"d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000)('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) " +
"d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000)('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)";
res = TDengine.Query(conn, sql);
CheckRes(conn, res, "failed to insert data");
int affectedRows = TDengine.AffectRows(res);
Console.WriteLine("affectedRows " + affectedRows);
TDengine.FreeResult(res);
ExitProgram(conn, 0);
try
{
IntPtr res = TDengine.Query(conn, "CREATE DATABASE power");
CheckRes(conn, res, "failed to create database");
res = TDengine.Query(conn, "USE power");
CheckRes(conn, res, "failed to change database");
res = TDengine.Query(conn, "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)");
CheckRes(conn, res, "failed to create stable");
var sql = "INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) " +
"d1002 USING power.meters TAGS('California.SanFrancisco', 3) VALUES('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) " +
"d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000)('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) " +
"d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000)('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)";
res = TDengine.Query(conn, sql);
CheckRes(conn, res, "failed to insert data");
int affectedRows = TDengine.AffectRows(res);
Console.WriteLine("affectedRows " + affectedRows);
TDengine.FreeResult(res);
}
finally
{
TDengine.Close(conn);
}
}
static IntPtr GetConnection()
......@@ -37,8 +44,7 @@ namespace TDengineExample
var conn = TDengine.Connect(host, username, password, dbname, port);
if (conn == IntPtr.Zero)
{
Console.WriteLine("Connect to TDengine failed");
Environment.Exit(0);
throw new Exception("Connect to TDengine failed");
}
else
{
......@@ -51,17 +57,10 @@ namespace TDengineExample
{
if (TDengine.ErrorNo(res) != 0)
{
Console.Write(errorMsg + " since: " + TDengine.Error(res));
ExitProgram(conn, 1);
throw new Exception($"{errorMsg} since: {TDengine.Error(res)}");
}
}
static void ExitProgram(IntPtr conn, int exitCode)
{
TDengine.Close(conn);
TDengine.Cleanup();
Environment.Exit(exitCode);
}
}
}
......
......@@ -9,45 +9,50 @@ namespace TDengineExample
static void Main()
{
conn = GetConnection();
PrepareSTable();
// 1. init and prepare
stmt = TDengine.StmtInit(conn);
if (stmt == IntPtr.Zero)
try
{
Console.WriteLine("failed to init stmt, " + TDengine.Error(stmt));
ExitProgram();
}
int res = TDengine.StmtPrepare(stmt, "INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)");
CheckStmtRes(res, "failed to prepare stmt");
PrepareSTable();
// 1. init and prepare
stmt = TDengine.StmtInit(conn);
if (stmt == IntPtr.Zero)
{
throw new Exception("failed to init stmt.");
}
int res = TDengine.StmtPrepare(stmt, "INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)");
CheckStmtRes(res, "failed to prepare stmt");
// 2. bind table name and tags
TAOS_MULTI_BIND[] tags = new TAOS_MULTI_BIND[2] { TaosMultiBind.MultiBindBinary(new string[]{"California.SanFrancisco"}), TaosMultiBind.MultiBindInt(new int?[] {2}) };
res = TDengine.StmtSetTbnameTags(stmt, "d1001", tags);
CheckStmtRes(res, "failed to bind table name and tags");
// 2. bind table name and tags
TAOS_MULTI_BIND[] tags = new TAOS_MULTI_BIND[2] { TaosMultiBind.MultiBindBinary(new string[] { "California.SanFrancisco" }), TaosMultiBind.MultiBindInt(new int?[] { 2 }) };
res = TDengine.StmtSetTbnameTags(stmt, "d1001", tags);
CheckStmtRes(res, "failed to bind table name and tags");
// 3. bind values
TAOS_MULTI_BIND[] values = new TAOS_MULTI_BIND[4] {
// 3. bind values
TAOS_MULTI_BIND[] values = new TAOS_MULTI_BIND[4] {
TaosMultiBind.MultiBindTimestamp(new long[2] { 1648432611249, 1648432611749}),
TaosMultiBind.MultiBindFloat(new float?[2] { 10.3f, 12.6f}),
TaosMultiBind.MultiBindInt(new int?[2] { 219, 218}),
TaosMultiBind.MultiBindFloat(new float?[2]{ 0.31f, 0.33f})
};
res = TDengine.StmtBindParamBatch(stmt, values);
CheckStmtRes(res, "failed to bind params");
res = TDengine.StmtBindParamBatch(stmt, values);
CheckStmtRes(res, "failed to bind params");
// 4. add batch
res = TDengine.StmtAddBatch(stmt);
CheckStmtRes(res, "failed to add batch");
// 4. add batch
res = TDengine.StmtAddBatch(stmt);
CheckStmtRes(res, "failed to add batch");
// 5. execute
res = TDengine.StmtExecute(stmt);
CheckStmtRes(res, "faild to execute");
// 5. execute
res = TDengine.StmtExecute(stmt);
CheckStmtRes(res, "faild to execute");
// 6. free
TaosMultiBind.FreeTaosBind(tags);
TaosMultiBind.FreeTaosBind(values);
}
finally
{
TDengine.Close(conn);
}
// 6. free
TaosMultiBind.FreeTaosBind(tags);
TaosMultiBind.FreeTaosBind(values);
TDengine.Close(conn);
TDengine.Cleanup();
}
static IntPtr GetConnection()
......@@ -60,8 +65,7 @@ namespace TDengineExample
var conn = TDengine.Connect(host, username, password, dbname, port);
if (conn == IntPtr.Zero)
{
Console.WriteLine("Connect to TDengine failed");
Environment.Exit(0);
throw new Exception("Connect to TDengine failed");
}
else
{
......@@ -70,8 +74,6 @@ namespace TDengineExample
return conn;
}
static void PrepareSTable()
{
IntPtr res = TDengine.Query(conn, "CREATE DATABASE power");
......@@ -90,9 +92,8 @@ namespace TDengineExample
int code = TDengine.StmtClose(stmt);
if (code != 0)
{
Console.WriteLine($"falied to close stmt, {code} reason: {TDengine.StmtErrorStr(stmt)} ");
throw new Exception($"falied to close stmt, {code} reason: {TDengine.StmtErrorStr(stmt)} ");
}
ExitProgram();
}
}
......@@ -100,16 +101,9 @@ namespace TDengineExample
{
if (TDengine.ErrorNo(res) != 0)
{
Console.WriteLine(errorMsg + " since:" + TDengine.Error(res));
ExitProgram();
throw new Exception(errorMsg + " since:" + TDengine.Error(res));
}
}
static void ExitProgram()
{
TDengine.Close(conn);
TDengine.Cleanup();
Environment.Exit(1);
}
}
}
......@@ -11,11 +11,10 @@ namespace TMQExample
{
IntPtr conn = GetConnection();
string topic = "topic_example";
Console.WriteLine($"create topic if not exist {topic} as select * from meters");
//create topic
IntPtr res = TDengine.Query(conn, $"create topic if not exists {topic} as select * from meters");
if (res == IntPtr.Zero)
if (TDengine.ErrorNo(res) != 0 )
{
throw new Exception($"create topic failed, reason:{TDengine.Error(res)}");
}
......@@ -26,7 +25,7 @@ namespace TMQExample
TDConnectUser = "root",
TDConnectPasswd = "taosdata",
MsgWithTableName = "true",
TDConnectIp = "127.0.0.1",
TDConnectIp = "127.0.0.1",
};
// create consumer
......@@ -65,7 +64,6 @@ namespace TMQExample
List<string> topics = consumer.Subscription();
topics.ForEach(t => Console.WriteLine("topic name:{0}", t));
// unsubscribe
consumer.Unsubscribe();
......@@ -73,7 +71,6 @@ namespace TMQExample
consumer.Close();
TDengine.Close(conn);
}
static IntPtr GetConnection()
......@@ -86,8 +83,7 @@ namespace TMQExample
var conn = TDengine.Connect(host, username, password, dbname, port);
if (conn == IntPtr.Zero)
{
Console.WriteLine("Connect to TDengine failed");
System.Environment.Exit(0);
throw new Exception("Connect to TDengine failed");
}
else
{
......
......@@ -9,7 +9,7 @@
</PropertyGroup>
<ItemGroup>
<PackageReference Include="TDengine.Connector" Version="3.0.0" />
<PackageReference Include="TDengine.Connector" Version="3.0.1" />
</ItemGroup>
</Project>
using System;
using TDengineWS.Impl;
namespace Examples
{
public class WSConnExample
{
static void Main(string[] args)
{
string DSN = "ws://root:taosdata@127.0.0.1:6041/test";
IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN);
if (wsConn == IntPtr.Zero)
{
throw new Exception($"get WS connection failed,reason:{LibTaosWS.WSErrorStr(IntPtr.Zero)} code:{LibTaosWS.WSErrorNo(IntPtr.Zero)}");
}
else
{
Console.WriteLine("Establish connect success.");
}
// close connection.
LibTaosWS.WSClose(wsConn);
}
}
}
\ No newline at end of file
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>net5.0</TargetFramework>
<Nullable>enable</Nullable>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="TDengine.Connector" Version="3.0.1" />
</ItemGroup>
</Project>
using System;
using TDengineWS.Impl;
namespace Examples
{
public class WSInsertExample
{
static void Main(string[] args)
{
string DSN = "ws://root:taosdata@127.0.0.1:6041/test";
IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN);
// Assert if connection is validate
if (wsConn == IntPtr.Zero)
{
throw new Exception($"get WS connection failed,reason:{LibTaosWS.WSErrorStr(IntPtr.Zero)} code:{LibTaosWS.WSErrorNo(IntPtr.Zero)}");
}
else
{
Console.WriteLine("Establish connect success.");
}
string createTable = "CREATE STABLE test.meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);";
string insert = "INSERT INTO test.d1001 USING test.meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)" +
"test.d1002 USING test.meters TAGS('California.SanFrancisco', 3) VALUES('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)" +
"test.d1003 USING test.meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000)('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) " +
"test.d1004 USING test.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000)('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)";
IntPtr wsRes = LibTaosWS.WSQuery(wsConn, createTable);
ValidInsert("create table", wsRes);
LibTaosWS.WSFreeResult(wsRes);
wsRes = LibTaosWS.WSQuery(wsConn, insert);
ValidInsert("insert data", wsRes);
LibTaosWS.WSFreeResult(wsRes);
// close connection.
LibTaosWS.WSClose(wsConn);
}
static void ValidInsert(string desc, IntPtr wsRes)
{
int code = LibTaosWS.WSErrorNo(wsRes);
if (code != 0)
{
throw new Exception($"execute SQL failed: reason: {LibTaosWS.WSErrorStr(wsRes)}, code:{code}");
}
else
{
Console.WriteLine("{0} success affect {2} rows, cost {1} nanoseconds", desc, LibTaosWS.WSTakeTiming(wsRes), LibTaosWS.WSAffectRows(wsRes));
}
}
}
}
// Establish connect success.
// create table success affect 0 rows, cost 3717542 nanoseconds
// insert data success affect 8 rows, cost 2613637 nanoseconds
\ No newline at end of file
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>net5.0</TargetFramework>
<Nullable>enable</Nullable>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="TDengine.Connector" Version="3.0.1" />
</ItemGroup>
</Project>
using System;
using TDengineWS.Impl;
using System.Collections.Generic;
using TDengineDriver;
namespace Examples
{
public class WSQueryExample
{
static void Main(string[] args)
{
string DSN = "ws://root:taosdata@127.0.0.1:6041/test";
IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN);
if (wsConn == IntPtr.Zero)
{
throw new Exception($"get WS connection failed,reason:{LibTaosWS.WSErrorStr(IntPtr.Zero)} code:{LibTaosWS.WSErrorNo(IntPtr.Zero)}");
}
else
{
Console.WriteLine("Establish connect success.");
}
string select = "select * from test.meters";
// optional:wsRes = LibTaosWS.WSQuery(wsConn, select);
IntPtr wsRes = LibTaosWS.WSQueryTimeout(wsConn, select, 1);
// Assert if query execute success.
int code = LibTaosWS.WSErrorNo(wsRes);
if (code != 0)
{
throw new Exception($"execute SQL failed: reason: {LibTaosWS.WSErrorStr(wsRes)}, code:{code}");
}
// get meta data
List<TDengineMeta> metas = LibTaosWS.WSGetFields(wsRes);
// get retrieved data
List<object> dataSet = LibTaosWS.WSGetData(wsRes);
// do something with result.
foreach (var meta in metas)
{
Console.Write("{0} {1}({2}) \t|\t", meta.name, meta.TypeName(), meta.size);
}
Console.WriteLine("");
for (int i = 0; i < dataSet.Count;)
{
for (int j = 0; j < metas.Count; j++)
{
Console.Write("{0}\t|\t", dataSet[i]);
i++;
}
Console.WriteLine("");
}
// Free result after use.
LibTaosWS.WSFreeResult(wsRes);
// close connection.
LibTaosWS.WSClose(wsConn);
}
}
}
// Establish connect success.
// ts TIMESTAMP(8) | current FLOAT(4) | voltage INT(4) | phase FLOAT(4) | location BINARY(64) | groupid INT(4) |
// 1538548685000 | 10.8 | 223 | 0.29 | California.LosAngeles | 3 |
// 1538548686500 | 11.5 | 221 | 0.35 | California.LosAngeles | 3 |
// 1538548685500 | 11.8 | 221 | 0.28 | California.LosAngeles | 2 |
// 1538548696600 | 13.4 | 223 | 0.29 | California.LosAngeles | 2 |
// 1538548685000 | 10.3 | 219 | 0.31 | California.SanFrancisco | 2 |
// 1538548695000 | 12.6 | 218 | 0.33 | California.SanFrancisco | 2 |
// 1538548696800 | 12.3 | 221 | 0.31 | California.SanFrancisco | 2 |
// 1538548696650 | 10.3 | 218 | 0.25 | California.SanFrancisco | 3 |
\ No newline at end of file
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>net5.0</TargetFramework>
<Nullable>enable</Nullable>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="TDengine.Connector" Version="3.0.1" />
</ItemGroup>
</Project>
using System;
using TDengineWS.Impl;
using TDengineDriver;
using System.Runtime.InteropServices;
namespace Examples
{
public class WSStmtExample
{
static void Main(string[] args)
{
const string DSN = "ws://root:taosdata@127.0.0.1:6041/test";
const string table = "meters";
const string database = "test";
const string childTable = "d1005";
string insert = $"insert into ? using {database}.{table} tags(?,?) values(?,?,?,?)";
const int numOfTags = 2;
const int numOfColumns = 4;
// Establish connection
IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN);
if (wsConn == IntPtr.Zero)
{
throw new Exception($"get WS connection failed,reason:{LibTaosWS.WSErrorStr(IntPtr.Zero)} code:{LibTaosWS.WSErrorNo(IntPtr.Zero)}");
}
else
{
Console.WriteLine("Establish connect success...");
}
// init stmt
IntPtr wsStmt = LibTaosWS.WSStmtInit(wsConn);
if (wsStmt != IntPtr.Zero)
{
int code = LibTaosWS.WSStmtPrepare(wsStmt, insert);
ValidStmtStep(code, wsStmt, "WSStmtPrepare");
TAOS_MULTI_BIND[] wsTags = new TAOS_MULTI_BIND[] { WSMultiBind.WSBindNchar(new string[] { "California.SanDiego" }), WSMultiBind.WSBindInt(new int?[] { 4 }) };
code = LibTaosWS.WSStmtSetTbnameTags(wsStmt, $"{database}.{childTable}", wsTags, numOfTags);
ValidStmtStep(code, wsStmt, "WSStmtSetTbnameTags");
TAOS_MULTI_BIND[] data = new TAOS_MULTI_BIND[4];
data[0] = WSMultiBind.WSBindTimestamp(new long[] { 1538548687000, 1538548688000, 1538548689000, 1538548690000, 1538548691000 });
data[1] = WSMultiBind.WSBindFloat(new float?[] { 10.30F, 10.40F, 10.50F, 10.60F, 10.70F });
data[2] = WSMultiBind.WSBindInt(new int?[] { 223, 221, 222, 220, 219 });
data[3] = WSMultiBind.WSBindFloat(new float?[] { 0.31F, 0.32F, 0.33F, 0.35F, 0.28F });
code = LibTaosWS.WSStmtBindParamBatch(wsStmt, data, numOfColumns);
ValidStmtStep(code, wsStmt, "WSStmtBindParamBatch");
code = LibTaosWS.WSStmtAddBatch(wsStmt);
ValidStmtStep(code, wsStmt, "WSStmtAddBatch");
IntPtr stmtAffectRowPtr = Marshal.AllocHGlobal(Marshal.SizeOf(typeof(Int32)));
code = LibTaosWS.WSStmtExecute(wsStmt, stmtAffectRowPtr);
ValidStmtStep(code, wsStmt, "WSStmtExecute");
Console.WriteLine("WS STMT insert {0} rows...", Marshal.ReadInt32(stmtAffectRowPtr));
Marshal.FreeHGlobal(stmtAffectRowPtr);
LibTaosWS.WSStmtClose(wsStmt);
// Free unmanaged memory
WSMultiBind.WSFreeTaosBind(wsTags);
WSMultiBind.WSFreeTaosBind(data);
//check result with SQL "SELECT * FROM test.d1005;"
}
else
{
throw new Exception("Init STMT failed...");
}
// close connection.
LibTaosWS.WSClose(wsConn);
}
static void ValidStmtStep(int code, IntPtr wsStmt, string desc)
{
if (code != 0)
{
throw new Exception($"{desc} failed,reason: {LibTaosWS.WSErrorStr(wsStmt)}, code: {code}");
}
else
{
Console.WriteLine("{0} success...", desc);
}
}
}
}
// WSStmtPrepare success...
// WSStmtSetTbnameTags success...
// WSStmtBindParamBatch success...
// WSStmtAddBatch success...
// WSStmtExecute success...
// WS STMT insert 5 rows...
\ No newline at end of file
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>net5.0</TargetFramework>
<Nullable>enable</Nullable>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="TDengine.Connector" Version="3.0.1" />
</ItemGroup>
</Project>
```csharp title="原生连接"
{{#include docs/examples/csharp/ConnectExample.cs}}
{{#include docs/examples/csharp/connect/Program.cs}}
```
:::info
C# 连接器目前只支持原生连接。
```csharp title="WebSocket 连接"
{{#include docs/examples/csharp/wsConnect/Program.cs}}
```
:::
```csharp
{{#include docs/examples/csharp/InfluxDBLineExample.cs}}
{{#include docs/examples/csharp/influxdbLine/Program.cs}}
```
```csharp
{{#include docs/examples/csharp/OptsJsonExample.cs}}
{{#include docs/examples/csharp/optsJSON/Program.cs}}
```
```csharp
{{#include docs/examples/csharp/OptsTelnetExample.cs}}
{{#include docs/examples/csharp/optsTelnet/Program.cs}}
```
```csharp
{{#include docs/examples/csharp/SQLInsertExample.cs}}
{{#include docs/examples/csharp/sqlInsert/Program.cs}}
```
```csharp
{{#include docs/examples/csharp/StmtInsertExample.cs}}
{{#include docs/examples/csharp/stmtInsert/Program.cs}}
```
```csharp
{{#include docs/examples/csharp/QueryExample.cs}}
{{#include docs/examples/csharp/query/Program.cs}}
```
```csharp
{{#include docs/examples/csharp/AsyncQueryExample.cs}}
{{#include docs/examples/csharp/asyncQuery/Program.cs}}
```
......@@ -52,7 +52,7 @@ CREATE TABLE d1004 USING meters TAGS ("California.LosAngeles", 3);
### 创建流
```sql
create stream current_stream into current_stream_output_stb as select _wstart as start, _wend as end, max(current) as max_current from meters where voltage <= 220 interval (5s);
create stream current_stream into current_stream_output_stb as select _wstart as start, _wend as wend, max(current) as max_current from meters where voltage <= 220 interval (5s);
```
### 写入数据
......@@ -71,7 +71,7 @@ insert into d1004 values("2018-10-03 14:38:06.500", 11.50000, 221, 0.35000);
```sql
taos> select start, end, max_current from current_stream_output_stb;
start | end | max_current |
start | wend | max_current |
===========================================================================
2018-10-03 14:38:05.000 | 2018-10-03 14:38:10.000 | 10.30000 |
2018-10-03 14:38:15.000 | 2018-10-03 14:38:20.000 | 12.60000 |
......
```csharp
{{#include docs/examples/csharp/SubscribeDemo.cs}}
{{#include docs/examples/csharp/subscribe/Program.cs}}
```
\ No newline at end of file
......@@ -116,7 +116,7 @@ DSN 描述字符串基本结构如下:
- **protocol**: 显示指定以何种方式建立连接,例如:`taos+ws://localhost:6041` 指定以 Websocket 方式建立连接。
- **username/password**: 用于创建连接的用户名及密码。
- **host/port**: 指定创建连接的服务器及端口,当不指定服务器地址及端口时(`taos://`),原生连接默认为 `localhost:6030`,Websocket 连接默认为 `localhost:6041` 。
- **database**: 指定默认连接的数据库名。
- **database**: 指定默认连接的数据库名,可选参数
- **params**:其他可选参数。
一个完整的 DSN 描述字符串示例如下:
......
......@@ -17,7 +17,7 @@ import CSAsyncQuery from "../07-develop/04-query-data/_cs_async.mdx"
`TDengine.Connector` 是 TDengine 提供的 C# 语言连接器。C# 开发人员可以通过它开发存取 TDengine 集群数据的 C# 应用软件。
`TDengine.Connector` 连接器支持通过 TDengine 客户端驱动(taosc)建立与 TDengine 运行实例的连接,提供数据写入、查询、订阅、schemaless 数据写入、参数绑定接口数据写入等功能 `TDengine.Connector` 目前暂未提供 REST 连接方式,用户可以参考 [REST API](../rest-api/) 文档自行编写
`TDengine.Connector` 连接器支持通过 TDengine 客户端驱动(taosc)建立与 TDengine 运行实例的连接,提供数据写入、查询、数据订阅、schemaless 数据写入、参数绑定接口数据写入等功能。 `TDengine.Connector` 还支持 WebSocket,通过 DSN 建立 WebSocket 连接,提供数据写入、查询、参数绑定接口数据写入等功能
本文介绍如何在 Linux 或 Windows 环境中安装 `TDengine.Connector`,并通过 `TDengine.Connector` 连接 TDengine 集群,进行数据写入、查询等基本操作。
......@@ -35,12 +35,29 @@ import CSAsyncQuery from "../07-develop/04-query-data/_cs_async.mdx"
## 支持的功能特性
<Tabs defaultValue="native">
<TabItem value="native" label="原生连接">
1. 连接管理
2. 普通查询
3. 连续查询
4. 参数绑定
5. 订阅功能
5. 数据订阅(TMQ)
6. Schemaless
</TabItem>
<TabItem value="rest" label="WebSocket 连接">
1. 连接管理
2. 普通查询
3. 连续查询
4. 参数绑定
</TabItem>
</Tabs>
## 安装步骤
......@@ -79,7 +96,13 @@ dotnet add exmaple.csproj reference src/TDengine.csproj
## 建立连接
``` C#
<Tabs defaultValue="native">
<TabItem value="native" label="原生连接">
使用 host、username、password、port 等信息建立连接。
``` csharp
using TDengineDriver;
namespace TDengineExample
......@@ -109,17 +132,63 @@ namespace TDengineExample
}
}
}
```
</TabItem>
<TabItem value="rest" label="WebSocket 连接">
使用 DSN 建立 WebSocket 连接 DSN 连接。 描述字符串基本结构如下:
```text
[<protocol>]://[[<username>:<password>@]<host>:<port>][/<database>][?<p1>=<v1>[&<p2>=<v2>]]
|------------|---|-----------|-----------|------|------|------------|-----------------------|
| protocol | | username | password | host | port | database | params |
```
各部分意义见下表:
* **protocol**: 显示指定以何种方式建立连接,例如:`ws://localhost:6041` 指定以 Websocket 方式建立连接(支持http/ws)。
* **username/password**: 用于创建连接的用户名及密码(默认`root/taosdata`)。
* **host/port**: 指定创建连接的服务器及端口,WebSocket 连接默认为 `localhost:6041` 。
* **database**: 指定默认连接的数据库名,可选参数。
* **params**:其他可选参数。
``` csharp
{{#include docs/examples/csharp/wsConnect/Program.cs}}
```
</TabItem>
</Tabs>
## 使用示例
### 写入数据
#### SQL 写入
<Tabs defaultValue="native">
<TabItem value="native" label="原生连接">
<CSInsert />
</TabItem>
<TabItem value="rest" label="WebSocket 连接">
```csharp
{{#include docs/examples/csharp/wsInsert/Program.cs}}
```
</TabItem>
</Tabs>
#### InfluxDB 行协议写入
<CSInfluxLine />
......@@ -132,12 +201,50 @@ namespace TDengineExample
<CSOpenTSDBJson />
#### 参数绑定
<Tabs defaultValue="native">
<TabItem value="native" label="原生连接">
``` csharp
{{#include docs/examples/csharp/stmtInsert/Program.cs}}
```
</TabItem>
<TabItem value="rest" label="WebSocket 连接">
```csharp
{{#include docs/examples/csharp/wsStmt/Program.cs}}
```
</TabItem>
</Tabs>
### 查询数据
#### 同步查询
<Tabs defaultValue="native">
<TabItem value="native" label="原生连接">
<CSQuery />
</TabItem>
<TabItem value="rest" label="WebSocket 连接">
```csharp
{{#include docs/examples/csharp/wsQuery/Program.cs}}
```
</TabItem>
</Tabs>
#### 异步查询
<CSAsyncQuery />
......@@ -151,12 +258,15 @@ namespace TDengineExample
| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/Stmt) | 使用 TDengine.Connector 实现的参数绑定插入和查询的示例 |
| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/schemaless) | 使用 TDengine.Connector 实现的使用 schemaless 写入的示例 |
| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/AsyncQuery/QueryAsync.cs) | 使用 TDengine.Connector 实现的异步查询的示例 |
| [TMQ](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/TMQ/TMQ.cs) | 使用 TDengine.Connector 实现的订阅数据的示例 |
| [数据订阅(TMQ)](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/TMQ/TMQ.cs) | 使用 TDengine.Connector 实现的订阅数据的示例 |
| [Basic WebSocket Usage](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSample.cs) | 使用 TDengine.Connector 的 WebSocket 基本的示例 |
| [Basic WebSocket STMT](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSTMT.cs) | 使用 TDengine.Connector 的 WebSocket STMT 基本的示例 |
## 重要更新记录
| TDengine.Connector | 说明 |
|--------------------|--------------------------------|
| 3.0.1 | 支持 WebSocket 和 Cloud,查询,插入,参数绑定。 |
| 3.0.0 | 支持 TDengine 3.0.0.0,不兼容 2.x。新增接口TDengine.Impl.GetData(),解析查询结果。 |
| 1.0.7 | 修复 TDengine.Query()内存泄露。 |
| 1.0.6 | 修复 schemaless 在 1.0.4 和 1.0.5 中失效 bug。 |
......
---
sidebar_label: 数据类型
title: 数据类型
description: "TDengine 支持的数据类型: 时间戳、浮点型、JSON 类型等"
description: 'TDengine 支持的数据类型: 时间戳、浮点型、JSON 类型等'
---
## 时间戳
......@@ -9,64 +9,65 @@ description: "TDengine 支持的数据类型: 时间戳、浮点型、JSON 类
使用 TDengine,最重要的是时间戳。创建并插入记录、查询历史记录的时候,均需要指定时间戳。时间戳有如下规则:
- 时间格式为 `YYYY-MM-DD HH:mm:ss.MS`,默认时间分辨率为毫秒。比如:`2017-08-12 18:25:58.128`
- 内部函数 now 是客户端的当前时间
- 插入记录时,如果时间戳为 now,插入数据时使用提交这条记录的客户端的当前时间
- Epoch Time:时间戳也可以是一个长整数,表示从 UTC 时间 1970-01-01 00:00:00 开始的毫秒数。相应地,如果所在 Database 的时间精度设置为“微秒”,则长整型格式的时间戳含义也就对应于从 UTC 时间 1970-01-01 00:00:00 开始的微秒数;纳秒精度逻辑类似
- 时间可以加减,比如 now-2h,表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 b(纳秒)、u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`,表示查询两周前整整一周的数据。在指定降采样操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n (自然月) 和 y (自然年)
- 内部函数 NOW 是客户端的当前时间
- 插入记录时,如果时间戳为 NOW,插入数据时使用提交这条记录的客户端的当前时间
- Epoch Time:时间戳也可以是一个长整数,表示从 UTC 时间 1970-01-01 00:00:00 开始的毫秒数。相应地,如果所在 Database 的时间精度设置为“微秒”,则长整型格式的时间戳含义也就对应于从 UTC 时间 1970-01-01 00:00:00 开始的微秒数;纳秒精度逻辑相同
- 时间可以加减,比如 NOW-2h,表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 b(纳秒)、u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `SELECT * FROM t1 WHERE ts > NOW-2w AND ts <= NOW-1w`,表示查询两周前整整一周的数据。在指定降采样操作(Down Sampling)的时间窗口(Interval)时,时间单位还可以使用 n(自然月)和 y(自然年)
TDengine 缺省的时间戳精度是毫秒,但通过在 `CREATE DATABASE` 时传递的 PRECISION 参数也可以支持微秒和纳秒。
TDengine 缺省的时间戳精度是毫秒,但通过在 `CREATE DATABASE` 时传递的 `PRECISION` 参数也可以支持微秒和纳秒。
```sql
CREATE DATABASE db_name PRECISION 'ns';
```
## 数据类型
在 TDengine 中,普通表的数据模型中可使用以下数据类型。
| # | **类型** | **Bytes** | **说明** |
| --- | :-------: | --------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒和纳秒,详细说明见上节。 |
| 2 | INT | 4 | 整型,范围 [-2^31, 2^31-1] |
| 3 | INT UNSIGNED| 4| 无符号整数,[0, 2^32-1]
| 4 | BIGINT | 8 | 长整型,范围 [-2^63, 2^63-1] |
| 5 | BIGINT UNSIGNED | 8 | 长整型,范围 [0, 2^64-1] |
| 6 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] |
| 7 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] |
| 8 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。 |
| 9 | SMALLINT | 2 | 短整型, 范围 [-32768, 32767] |
| 10 | SMALLINT UNSIGNED | 2| 无符号短整型,范围 [0, 65535] |
| 11 | TINYINT | 1 | 单字节整型,范围 [-128, 127] |
| 12 | TINYINT UNSIGNED | 1 | 无符号单字节整型,范围 [0, 255] |
| 13 | BOOL | 1 | 布尔型,{true, false} |
| 14 | NCHAR | 自定义 | 记录包含多字节字符在内的字符串,如中文字符。每个 nchar 字符占用 4 bytes 的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\’`。nchar 使用时须指定字符串大小,类型为 nchar(10) 的列表示此列的字符串最多存储 10 个 nchar 字符,会固定占用 40 bytes 的空间。如果用户字符串长度超出声明长度,将会报错。 |
| 15 | JSON | | json 数据类型, 只有 tag 可以是 json 格式 |
| 16 | VARCHAR | 自定义 | BINARY类型的别名 |
| # | **类型** | **Bytes** | **说明** |
| --- | :---------------: | --------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒和纳秒,详细说明见上节。 |
| 2 | INT | 4 | 整型,范围 [-2^31, 2^31-1] |
| 3 | INT UNSIGNED | 4 | 无符号整数,[0, 2^32-1] |
| 4 | BIGINT | 8 | 长整型,范围 [-2^63, 2^63-1] |
| 5 | BIGINT UNSIGNED | 8 | 长整型,范围 [0, 2^64-1] |
| 6 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] |
| 7 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] |
| 8 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 NCHAR |
| 9 | SMALLINT | 2 | 短整型, 范围 [-32768, 32767] |
| 10 | SMALLINT UNSIGNED | 2 | 无符号短整型,范围 [0, 65535] |
| 11 | TINYINT | 1 | 单字节整型,范围 [-128, 127] |
| 12 | TINYINT UNSIGNED | 1 | 无符号单字节整型,范围 [0, 255] |
| 13 | BOOL | 1 | 布尔型,{true, false} |
| 14 | NCHAR | 自定义 | 记录包含多字节字符在内的字符串,如中文字符。每个 NCHAR 字符占用 4 字节的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\'`。NCHAR 使用时须指定字符串大小,类型为 NCHAR(10) 的列表示此列的字符串最多存储 10 个 NCHAR 字符,会固定占用 40 字节的空间。如果用户字符串长度超出声明长度,将会报错。 |
| 15 | JSON | | JSON 数据类型, 只有 Tag 可以是 JSON 格式 |
| 16 | VARCHAR | 自定义 | BINARY 类型的别名 |
:::note
- TDengine 对 SQL 语句中的英文字符不区分大小写,自动转化为小写执行。因此用户大小写敏感的字符串及密码,需要使用单引号将字符串引起来。
- 虽然 BINARY 类型在底层存储上支持字节型的二进制字符,但不同编程语言对二进制数据的处理方式并不保证一致,因此建议在 BINARY 类型中只存储 ASCII 可见字符,而避免存储不可见字符。多字节的数据,例如中文字符,则需要使用 NCHAR 类型进行保存。如果强行使用 BINARY 类型保存中文字符,虽然有时也能正常读写,但并不带有字符集信息,很容易出现数据乱码甚至数据损坏等情况。
- BINARY 类型理论上最长可以有 16374 字节。binary 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 binary(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 byte 的存储空间,总共固定占用 20 bytes 的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`
- BINARY 类型理论上最长可以有 16,374 字节。BINARY 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 BINARY(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 字节的存储空间,总共固定占用 20 字节的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\'`
- SQL 语句中的数值类型将依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型,因此在使用时要注意相应类型越界的情况。例如,9999999999999999999 会认为超过长整型的上边界而溢出,而 9999999999999999999.0 会被认为是有效的浮点数。
:::
## 常量
TDengine支持多个类型的常量,细节如下表:
| # | **语法** | **类型** | **说明** |
| --- | :-------: | --------- | -------------------------------------- |
| 1 | [{+ \| -}]123 | BIGINT | 整型数值的字面量的类型均为BIGINT。如果用户输入超过了BIGINT的表示范围,TDengine 按BIGINT对数值进行截断。|
| 2 | 123.45 | DOUBLE | 浮点数值的字面量的类型均为DOUBLE。TDengine依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型。|
| 3 | 1.2E3 | DOUBLE | 科学计数法的字面量的类型为DOUBLE。|
| 4 | 'abc' | BINARY | 单引号括住的内容为字符串字面值,其类型为BINARY,BINARY的size为实际的字符个数。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 \'。|
| 5 | "abc" | BINARY | 双引号括住的内容为字符串字面值,其类型为BINARY,BINARY的size为实际的字符个数。对于字符串内的双引号,可以用转义字符反斜线加单引号来表示,即 \"。 |
| 6 | TIMESTAMP {'literal' \| "literal"} | TIMESTAMP | TIMESTAMP关键字表示后面的字符串字面量需要被解释为TIMESTAMP类型。字符串需要满足YYYY-MM-DD HH:mm:ss.MS格式,其时间分辨率为当前数据库的时间分辨率。 |
| 7 | {TRUE \| FALSE} | BOOL | 布尔类型字面量。 |
| 8 | {'' \| "" \| '\t' \| "\t" \| ' ' \| " " \| NULL } | -- | 空值字面量。可以用于任意类型。|
TDengine 支持多个类型的常量,细节如下表:
| # | **语法** | **类型** | **说明** |
| --- | :-----------------------------------------------: | --------- | ----------------------------------------------------------------------------------------------------------------------------------------------------- |
| 1 | [{+ \| -}]123 | BIGINT | 整型数值的字面量的类型均为 BIGINT。如果用户输入超过了 BIGINT 的表示范围,TDengine 按 BIGINT 对数值进行截断。 |
| 2 | 123.45 | DOUBLE | 浮点数值的字面量的类型均为 DOUBLE。TDengine 依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型。 |
| 3 | 1.2E3 | DOUBLE | 科学计数法的字面量的类型为 DOUBLE。 |
| 4 | 'abc' | BINARY | 单引号括住的内容为字符串字面值,其类型为 BINARY,BINARY 的 Size 为实际的字符个数。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\'`。 |
| 5 | "abc" | BINARY | 双引号括住的内容为字符串字面值,其类型为 BINARY,BINARY 的 Size 为实际的字符个数。对于字符串内的双引号,可以用转义字符反斜线加单引号来表示,即 `\"`。 |
| 6 | TIMESTAMP {'literal' \| "literal"} | TIMESTAMP | TIMESTAMP 关键字表示后面的字符串字面量需要被解释为 TIMESTAMP 类型。字符串需要满足 YYYY-MM-DD HH:mm:ss.MS 格式,其时间分辨率为当前数据库的时间分辨率。 |
| 7 | {TRUE \| FALSE} | BOOL | 布尔类型字面量。 |
| 8 | {'' \| "" \| '\t' \| "\t" \| ' ' \| " " \| NULL } | -- | 空值字面量。可以用于任意类型。 |
:::note
- TDengine依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型,因此在使用时要注意相应类型越界的情况。例如,9999999999999999999会认为超过长整型的上边界而溢出,而9999999999999999999.0会被认为是有效的浮点数。
- TDengine 依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型,因此在使用时要注意相应类型越界的情况。例如,9999999999999999999 会认为超过长整型的上边界而溢出,而 9999999999999999999.0 会被认为是有效的浮点数。
:::
......@@ -69,7 +69,7 @@ order_expr:
### 通配符
通配符 \* 可以用于代指全部列。对于普通表,结果中只有普通列。对于超级表和子表,还包含了 TAG 列。
通配符 \* 可以用于代指全部列。对于普通表和子表,结果中只有普通列。对于超级表,还包含了 TAG 列。
```sql
SELECT * FROM d1001;
......
......@@ -1233,7 +1233,7 @@ SELECT SERVER_VERSION();
### SERVER_STATUS
```sql
SELECT SERVER_VERSION();
SELECT SERVER_STATUS();
```
**说明**:返回服务端当前的状态。
......@@ -325,7 +325,7 @@ charset 的有效值是 UTF-8。
| 适用范围 | 仅服务端适用 |
| 含义 | dnode 支持的最大 vnode 数目 |
| 取值范围 | 0-4096 |
| 缺省值 | 256 |
| 缺省值 | CPU 核数的 2 倍 |
## 时间相关
......@@ -668,153 +668,154 @@ charset 的有效值是 UTF-8。
| 15 | telemetryPort | 否 | 是 |
| 16 | queryPolicy | 否 | 是 |
| 17 | querySmaOptimize | 否 | 是 |
| 18 | queryBufferSize | 是 | 是 |
| 19 | maxNumOfDistinctRes | 是 | 是 |
| 20 | minSlidingTime | 是 | 是 |
| 21 | minIntervalTime | 是 | 是 |
| 22 | countAlwaysReturnValue | 是 | 是 |
| 23 | dataDir | 是 | 是 |
| 24 | minimalDataDirGB | 是 | 是 |
| 25 | supportVnodes | 否 | 是 |
| 26 | tempDir | 是 | 是 |
| 27 | minimalTmpDirGB | 是 | 是 |
| 28 | compressMsgSize | 是 | 是 |
| 29 | compressColData | 是 | 是 |
| 30 | smlChildTableName | 是 | 是 |
| 31 | smlTagName | 是 | 是 |
| 32 | smlDataFormat | 否 | 是 |
| 33 | statusInterval | 是 | 是 |
| 34 | shellActivityTimer | 是 | 是 |
| 35 | transPullupInterval | 否 | 是 |
| 36 | mqRebalanceInterval | 否 | 是 |
| 37 | ttlUnit | 否 | 是 |
| 38 | ttlPushInterval | 否 | 是 |
| 39 | numOfTaskQueueThreads | 否 | 是 |
| 40 | numOfRpcThreads | 否 | 是 |
| 41 | numOfCommitThreads | 是 | 是 |
| 42 | numOfMnodeReadThreads | 否 | 是 |
| 43 | numOfVnodeQueryThreads | 否 | 是 |
| 44 | numOfVnodeStreamThreads | 否 | 是 |
| 45 | numOfVnodeFetchThreads | 否 | 是 |
| 46 | numOfVnodeWriteThreads | 否 | 是 |
| 47 | numOfVnodeSyncThreads | 否 | 是 |
| 48 | numOfVnodeRsmaThreads | 否 | 是 |
| 49 | numOfQnodeQueryThreads | 否 | 是 |
| 50 | numOfQnodeFetchThreads | 否 | 是 |
| 51 | numOfSnodeSharedThreads | 否 | 是 |
| 52 | numOfSnodeUniqueThreads | 否 | 是 |
| 53 | rpcQueueMemoryAllowed | 否 | 是 |
| 54 | logDir | 是 | 是 |
| 55 | minimalLogDirGB | 是 | 是 |
| 56 | numOfLogLines | 是 | 是 |
| 57 | asyncLog | 是 | 是 |
| 58 | logKeepDays | 是 | 是 |
| 59 | debugFlag | 是 | 是 |
| 60 | tmrDebugFlag | 是 | 是 |
| 61 | uDebugFlag | 是 | 是 |
| 62 | rpcDebugFlag | 是 | 是 |
| 63 | jniDebugFlag | 是 | 是 |
| 64 | qDebugFlag | 是 | 是 |
| 65 | cDebugFlag | 是 | 是 |
| 66 | dDebugFlag | 是 | 是 |
| 67 | vDebugFlag | 是 | 是 |
| 68 | mDebugFlag | 是 | 是 |
| 69 | wDebugFlag | 是 | 是 |
| 70 | sDebugFlag | 是 | 是 |
| 71 | tsdbDebugFlag | 是 | 是 |
| 72 | tqDebugFlag | 否 | 是 |
| 73 | fsDebugFlag | 是 | 是 |
| 74 | udfDebugFlag | 否 | 是 |
| 75 | smaDebugFlag | 否 | 是 |
| 76 | idxDebugFlag | 否 | 是 |
| 77 | tdbDebugFlag | 否 | 是 |
| 78 | metaDebugFlag | 否 | 是 |
| 79 | timezone | 是 | 是 |
| 80 | locale | 是 | 是 |
| 81 | charset | 是 | 是 |
| 82 | udf | 是 | 是 |
| 83 | enableCoreFile | 是 | 是 |
| 84 | arbitrator | 是 | 否 |
| 85 | numOfThreadsPerCore | 是 | 否 |
| 86 | numOfMnodes | 是 | 否 |
| 87 | vnodeBak | 是 | 否 |
| 88 | balance | 是 | 否 |
| 89 | balanceInterval | 是 | 否 |
| 90 | offlineThreshold | 是 | 否 |
| 91 | role | 是 | 否 |
| 92 | dnodeNopLoop | 是 | 否 |
| 93 | keepTimeOffset | 是 | 否 |
| 94 | rpcTimer | 是 | 否 |
| 95 | rpcMaxTime | 是 | 否 |
| 96 | rpcForceTcp | 是 | 否 |
| 97 | tcpConnTimeout | 是 | 否 |
| 98 | syncCheckInterval | 是 | 否 |
| 99 | maxTmrCtrl | 是 | 否 |
| 100 | monitorReplica | 是 | 否 |
| 101 | smlTagNullName | 是 | 否 |
| 102 | keepColumnName | 是 | 否 |
| 103 | ratioOfQueryCores | 是 | 否 |
| 104 | maxStreamCompDelay | 是 | 否 |
| 105 | maxFirstStreamCompDelay | 是 | 否 |
| 106 | retryStreamCompDelay | 是 | 否 |
| 107 | streamCompDelayRatio | 是 | 否 |
| 108 | maxVgroupsPerDb | 是 | 否 |
| 109 | maxTablesPerVnode | 是 | 否 |
| 110 | minTablesPerVnode | 是 | 否 |
| 111 | tableIncStepPerVnode | 是 | 否 |
| 112 | cache | 是 | 否 |
| 113 | blocks | 是 | 否 |
| 114 | days | 是 | 否 |
| 115 | keep | 是 | 否 |
| 116 | minRows | 是 | 否 |
| 117 | maxRows | 是 | 否 |
| 118 | quorum | 是 | 否 |
| 119 | comp | 是 | 否 |
| 120 | walLevel | 是 | 否 |
| 121 | fsync | 是 | 否 |
| 122 | replica | 是 | 否 |
| 123 | partitions | 是 | 否 |
| 124 | quorum | 是 | 否 |
| 125 | update | 是 | 否 |
| 126 | cachelast | 是 | 否 |
| 127 | maxSQLLength | 是 | 否 |
| 128 | maxWildCardsLength | 是 | 否 |
| 129 | maxRegexStringLen | 是 | 否 |
| 130 | maxNumOfOrderedRes | 是 | 否 |
| 131 | maxConnections | 是 | 否 |
| 132 | mnodeEqualVnodeNum | 是 | 否 |
| 133 | http | 是 | 否 |
| 134 | httpEnableRecordSql | 是 | 否 |
| 135 | httpMaxThreads | 是 | 否 |
| 136 | restfulRowLimit | 是 | 否 |
| 137 | httpDbNameMandatory | 是 | 否 |
| 138 | httpKeepAlive | 是 | 否 |
| 139 | enableRecordSql | 是 | 否 |
| 140 | maxBinaryDisplayWidth | 是 | 否 |
| 141 | stream | 是 | 否 |
| 142 | retrieveBlockingModel | 是 | 否 |
| 143 | tsdbMetaCompactRatio | 是 | 否 |
| 144 | defaultJSONStrType | 是 | 否 |
| 145 | walFlushSize | 是 | 否 |
| 146 | keepTimeOffset | 是 | 否 |
| 147 | flowctrl | 是 | 否 |
| 148 | slaveQuery | 是 | 否 |
| 149 | adjustMaster | 是 | 否 |
| 150 | topicBinaryLen | 是 | 否 |
| 151 | telegrafUseFieldNum | 是 | 否 |
| 152 | deadLockKillQuery | 是 | 否 |
| 153 | clientMerge | 是 | 否 |
| 154 | sdbDebugFlag | 是 | 否 |
| 155 | odbcDebugFlag | 是 | 否 |
| 156 | httpDebugFlag | 是 | 否 |
| 157 | monDebugFlag | 是 | 否 |
| 158 | cqDebugFlag | 是 | 否 |
| 159 | shortcutFlag | 是 | 否 |
| 160 | probeSeconds | 是 | 否 |
| 161 | probeKillSeconds | 是 | 否 |
| 162 | probeInterval | 是 | 否 |
| 163 | lossyColumns | 是 | 否 |
| 164 | fPrecision | 是 | 否 |
| 165 | dPrecision | 是 | 否 |
| 166 | maxRange | 是 | 否 |
| 167 | range | 是 | 否 |
| 18 | queryRsmaTolerance | 否 | 是 |
| 19 | queryBufferSize | 是 | 是 |
| 20 | maxNumOfDistinctRes | 是 | 是 |
| 21 | minSlidingTime | 是 | 是 |
| 22 | minIntervalTime | 是 | 是 |
| 23 | countAlwaysReturnValue | 是 | 是 |
| 24 | dataDir | 是 | 是 |
| 25 | minimalDataDirGB | 是 | 是 |
| 26 | supportVnodes | 否 | 是 |
| 27 | tempDir | 是 | 是 |
| 28 | minimalTmpDirGB | 是 | 是 |
| 29 | compressMsgSize | 是 | 是 |
| 30 | compressColData | 是 | 是 |
| 31 | smlChildTableName | 是 | 是 |
| 32 | smlTagName | 是 | 是 |
| 33 | smlDataFormat | 否 | 是 |
| 34 | statusInterval | 是 | 是 |
| 35 | shellActivityTimer | 是 | 是 |
| 36 | transPullupInterval | 否 | 是 |
| 37 | mqRebalanceInterval | 否 | 是 |
| 38 | ttlUnit | 否 | 是 |
| 39 | ttlPushInterval | 否 | 是 |
| 40 | numOfTaskQueueThreads | 否 | 是 |
| 41 | numOfRpcThreads | 否 | 是 |
| 42 | numOfCommitThreads | 是 | 是 |
| 43 | numOfMnodeReadThreads | 否 | 是 |
| 44 | numOfVnodeQueryThreads | 否 | 是 |
| 45 | numOfVnodeStreamThreads | 否 | 是 |
| 46 | numOfVnodeFetchThreads | 否 | 是 |
| 47 | numOfVnodeWriteThreads | 否 | 是 |
| 48 | numOfVnodeSyncThreads | 否 | 是 |
| 49 | numOfVnodeRsmaThreads | 否 | 是 |
| 50 | numOfQnodeQueryThreads | 否 | 是 |
| 51 | numOfQnodeFetchThreads | 否 | 是 |
| 52 | numOfSnodeSharedThreads | 否 | 是 |
| 53 | numOfSnodeUniqueThreads | 否 | 是 |
| 54 | rpcQueueMemoryAllowed | 否 | 是 |
| 55 | logDir | 是 | 是 |
| 56 | minimalLogDirGB | 是 | 是 |
| 57 | numOfLogLines | 是 | 是 |
| 58 | asyncLog | 是 | 是 |
| 59 | logKeepDays | 是 | 是 |
| 60 | debugFlag | 是 | 是 |
| 61 | tmrDebugFlag | 是 | 是 |
| 62 | uDebugFlag | 是 | 是 |
| 63 | rpcDebugFlag | 是 | 是 |
| 64 | jniDebugFlag | 是 | 是 |
| 65 | qDebugFlag | 是 | 是 |
| 66 | cDebugFlag | 是 | 是 |
| 67 | dDebugFlag | 是 | 是 |
| 68 | vDebugFlag | 是 | 是 |
| 69 | mDebugFlag | 是 | 是 |
| 70 | wDebugFlag | 是 | 是 |
| 71 | sDebugFlag | 是 | 是 |
| 72 | tsdbDebugFlag | 是 | 是 |
| 73 | tqDebugFlag | 否 | 是 |
| 74 | fsDebugFlag | 是 | 是 |
| 75 | udfDebugFlag | 否 | 是 |
| 76 | smaDebugFlag | 否 | 是 |
| 77 | idxDebugFlag | 否 | 是 |
| 78 | tdbDebugFlag | 否 | 是 |
| 79 | metaDebugFlag | 否 | 是 |
| 80 | timezone | 是 | 是 |
| 81 | locale | 是 | 是 |
| 82 | charset | 是 | 是 |
| 83 | udf | 是 | 是 |
| 84 | enableCoreFile | 是 | 是 |
| 85 | arbitrator | 是 | 否 |
| 86 | numOfThreadsPerCore | 是 | 否 |
| 87 | numOfMnodes | 是 | 否 |
| 88 | vnodeBak | 是 | 否 |
| 89 | balance | 是 | 否 |
| 90 | balanceInterval | 是 | 否 |
| 91 | offlineThreshold | 是 | 否 |
| 92 | role | 是 | 否 |
| 93 | dnodeNopLoop | 是 | 否 |
| 94 | keepTimeOffset | 是 | 否 |
| 95 | rpcTimer | 是 | 否 |
| 96 | rpcMaxTime | 是 | 否 |
| 97 | rpcForceTcp | 是 | 否 |
| 98 | tcpConnTimeout | 是 | 否 |
| 99 | syncCheckInterval | 是 | 否 |
| 100 | maxTmrCtrl | 是 | 否 |
| 101 | monitorReplica | 是 | 否 |
| 102 | smlTagNullName | 是 | 否 |
| 103 | keepColumnName | 是 | 否 |
| 104 | ratioOfQueryCores | 是 | 否 |
| 105 | maxStreamCompDelay | 是 | 否 |
| 106 | maxFirstStreamCompDelay | 是 | 否 |
| 107 | retryStreamCompDelay | 是 | 否 |
| 108 | streamCompDelayRatio | 是 | 否 |
| 109 | maxVgroupsPerDb | 是 | 否 |
| 110 | maxTablesPerVnode | 是 | 否 |
| 111 | minTablesPerVnode | 是 | 否 |
| 112 | tableIncStepPerVnode | 是 | 否 |
| 113 | cache | 是 | 否 |
| 114 | blocks | 是 | 否 |
| 115 | days | 是 | 否 |
| 116 | keep | 是 | 否 |
| 117 | minRows | 是 | 否 |
| 118 | maxRows | 是 | 否 |
| 119 | quorum | 是 | 否 |
| 120 | comp | 是 | 否 |
| 121 | walLevel | 是 | 否 |
| 122 | fsync | 是 | 否 |
| 123 | replica | 是 | 否 |
| 124 | partitions | 是 | 否 |
| 125 | quorum | 是 | 否 |
| 126 | update | 是 | 否 |
| 127 | cachelast | 是 | 否 |
| 128 | maxSQLLength | 是 | 否 |
| 129 | maxWildCardsLength | 是 | 否 |
| 130 | maxRegexStringLen | 是 | 否 |
| 131 | maxNumOfOrderedRes | 是 | 否 |
| 132 | maxConnections | 是 | 否 |
| 133 | mnodeEqualVnodeNum | 是 | 否 |
| 134 | http | 是 | 否 |
| 135 | httpEnableRecordSql | 是 | 否 |
| 136 | httpMaxThreads | 是 | 否 |
| 137 | restfulRowLimit | 是 | 否 |
| 138 | httpDbNameMandatory | 是 | 否 |
| 139 | httpKeepAlive | 是 | 否 |
| 140 | enableRecordSql | 是 | 否 |
| 141 | maxBinaryDisplayWidth | 是 | 否 |
| 142 | stream | 是 | 否 |
| 143 | retrieveBlockingModel | 是 | 否 |
| 144 | tsdbMetaCompactRatio | 是 | 否 |
| 145 | defaultJSONStrType | 是 | 否 |
| 146 | walFlushSize | 是 | 否 |
| 147 | keepTimeOffset | 是 | 否 |
| 148 | flowctrl | 是 | 否 |
| 149 | slaveQuery | 是 | 否 |
| 150 | adjustMaster | 是 | 否 |
| 151 | topicBinaryLen | 是 | 否 |
| 152 | telegrafUseFieldNum | 是 | 否 |
| 153 | deadLockKillQuery | 是 | 否 |
| 154 | clientMerge | 是 | 否 |
| 155 | sdbDebugFlag | 是 | 否 |
| 156 | odbcDebugFlag | 是 | 否 |
| 157 | httpDebugFlag | 是 | 否 |
| 158 | monDebugFlag | 是 | 否 |
| 159 | cqDebugFlag | 是 | 否 |
| 160 | shortcutFlag | 是 | 否 |
| 161 | probeSeconds | 是 | 否 |
| 162 | probeKillSeconds | 是 | 否 |
| 163 | probeInterval | 是 | 否 |
| 164 | lossyColumns | 是 | 否 |
| 165 | fPrecision | 是 | 否 |
| 166 | dPrecision | 是 | 否 |
| 167 | maxRange | 是 | 否 |
| 168 | range | 是 | 否 |
......@@ -6,6 +6,9 @@ description: TDengine 发布历史、Release Notes 及下载链接
import Release from "/components/ReleaseV3";
## 3.0.1.2
<Release type="tdengine" version="3.0.1.2" />
## 3.0.1.1
......
......@@ -6,6 +6,10 @@ description: taosTools 的发布历史、Release Notes 和下载链接
import Release from "/components/ReleaseV3";
## 2.2.2
<Release type="tools" version="2.2.2" />
## 2.2.0
<Release type="tools" version="2.2.0" />
......
......@@ -2,7 +2,7 @@
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include "../../../../include/client/taos.h"
#include "taos.h"
#include "lauxlib.h"
#include "lua.h"
#include "lualib.h"
......@@ -35,7 +35,7 @@ static int l_connect(lua_State *L){
}
lua_getfield(L, 1, "port");
if (lua_isnumber(L,-1)){
if (lua_isnumber(L, -1)){
port = lua_tonumber(L, -1);
//printf("port = %d\n", port);
}
......@@ -60,7 +60,6 @@ static int l_connect(lua_State *L){
lua_settop(L,0);
taos_init();
lua_newtable(L);
int table_index = lua_gettop(L);
......@@ -102,7 +101,7 @@ static int l_query(lua_State *L){
printf("failed, reason:%s\n", taos_errstr(result));
lua_pushinteger(L, -1);
lua_setfield(L, table_index, "code");
lua_pushstring(L, taos_errstr(taos));
lua_pushstring(L, taos_errstr(result));
lua_setfield(L, table_index, "error");
return 1;
......@@ -113,7 +112,6 @@ static int l_query(lua_State *L){
int rows = 0;
int num_fields = taos_field_count(result);
const TAOS_FIELD *fields = taos_fetch_fields(result);
//char temp[256];
const int affectRows = taos_affected_rows(result);
// printf(" affect rows:%d\r\n", affectRows);
......@@ -122,7 +120,7 @@ static int l_query(lua_State *L){
lua_pushinteger(L, affectRows);
lua_setfield(L, table_index, "affected");
lua_newtable(L);
while ((row = taos_fetch_row(result))) {
//printf("row index:%d\n",rows);
rows++;
......@@ -136,7 +134,7 @@ static int l_query(lua_State *L){
}
lua_pushstring(L,fields[i].name);
int32_t* length = taos_fetch_lengths(result);
switch (fields[i].type) {
case TSDB_DATA_TYPE_TINYINT:
lua_pushinteger(L,*((char *)row[i]));
......@@ -158,7 +156,8 @@ static int l_query(lua_State *L){
break;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
lua_pushstring(L,(char *)row[i]);
//printf("type:%d, max len:%d, current len:%d\n",fields[i].type, fields[i].bytes, length[i]);
lua_pushlstring(L,(char *)row[i], length[i]);
break;
case TSDB_DATA_TYPE_TIMESTAMP:
lua_pushinteger(L,*((int64_t *)row[i]));
......@@ -166,6 +165,7 @@ static int l_query(lua_State *L){
case TSDB_DATA_TYPE_BOOL:
lua_pushinteger(L,*((char *)row[i]));
break;
case TSDB_DATA_TYPE_NULL:
default:
lua_pushnil(L);
break;
......@@ -235,112 +235,6 @@ static int l_async_query(lua_State *L){
return 1;
}
void stream_cb(void *param, TAOS_RES *result, TAOS_ROW row){
struct cb_param* p = (struct cb_param*) param;
TAOS_FIELD *fields = taos_fetch_fields(result);
int numFields = taos_num_fields(result);
// printf("\nnumfields:%d\n", numFields);
//printf("\n\r-----------------------------------------------------------------------------------\n");
lua_State *L = p->state;
lua_rawgeti(L, LUA_REGISTRYINDEX, p->callback);
lua_newtable(L);
for (int i = 0; i < numFields; ++i) {
if (row[i] == NULL) {
continue;
}
lua_pushstring(L,fields[i].name);
switch (fields[i].type) {
case TSDB_DATA_TYPE_TINYINT:
lua_pushinteger(L,*((char *)row[i]));
break;
case TSDB_DATA_TYPE_SMALLINT:
lua_pushinteger(L,*((short *)row[i]));
break;
case TSDB_DATA_TYPE_INT:
lua_pushinteger(L,*((int *)row[i]));
break;
case TSDB_DATA_TYPE_BIGINT:
lua_pushinteger(L,*((int64_t *)row[i]));
break;
case TSDB_DATA_TYPE_FLOAT:
lua_pushnumber(L,*((float *)row[i]));
break;
case TSDB_DATA_TYPE_DOUBLE:
lua_pushnumber(L,*((double *)row[i]));
break;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
lua_pushstring(L,(char *)row[i]);
break;
case TSDB_DATA_TYPE_TIMESTAMP:
lua_pushinteger(L,*((int64_t *)row[i]));
break;
case TSDB_DATA_TYPE_BOOL:
lua_pushinteger(L,*((char *)row[i]));
break;
default:
lua_pushnil(L);
break;
}
lua_settable(L, -3);
}
lua_call(L, 1, 0);
// printf("-----------------------------------------------------------------------------------\n\r");
}
static int l_open_stream(lua_State *L){
int r = luaL_ref(L, LUA_REGISTRYINDEX);
TAOS * taos = (TAOS*)lua_topointer(L,1);
const char * sqlstr = lua_tostring(L,2);
int stime = luaL_checknumber(L,3);
lua_newtable(L);
int table_index = lua_gettop(L);
struct cb_param *p = malloc(sizeof(struct cb_param));
p->state = L;
p->callback=r;
// printf("r:%d, L:%d\n",r,L);
void * s = taos_open_stream(taos,sqlstr,stream_cb,stime,p,NULL);
if (s == NULL) {
printf("failed to open stream, reason:%s\n", taos_errstr(taos));
free(p);
lua_pushnumber(L, -1);
lua_setfield(L, table_index, "code");
lua_pushstring(L, taos_errstr(taos));
lua_setfield(L, table_index, "error");
lua_pushlightuserdata(L,NULL);
lua_setfield(L, table_index, "stream");
}else{
// printf("success to open stream\n");
lua_pushnumber(L, 0);
lua_setfield(L, table_index, "code");
lua_pushstring(L, taos_errstr(taos));
lua_setfield(L, table_index, "error");
p->stream = s;
lua_pushlightuserdata(L,p);
lua_setfield(L, table_index, "stream");//stream has different content in lua and c.
}
return 1;
}
static int l_close_stream(lua_State *L){
//TODO:get stream and free cb_param
struct cb_param *p = lua_touserdata(L,1);
taos_close_stream(p->stream);
free(p);
return 0;
}
static int l_close(lua_State *L){
TAOS *taos= (TAOS*)lua_topointer(L,1);
......@@ -367,8 +261,6 @@ static const struct luaL_Reg lib[] = {
{"query", l_query},
{"query_a",l_async_query},
{"close", l_close},
{"open_stream", l_open_stream},
{"close_stream", l_close_stream},
{NULL, NULL}
};
......
......@@ -5,7 +5,7 @@
#include <lua.h>
#include <lauxlib.h>
#include <lualib.h>
#include "taos.h"
#include <taos.h>
struct cb_param{
lua_State* state;
......@@ -60,6 +60,8 @@ static int l_connect(lua_State *L){
lua_settop(L,0);
taos_init();
lua_newtable(L);
int table_index = lua_gettop(L);
......
......@@ -9,6 +9,50 @@ local config = {
max_packet_size = 1024 * 1024
}
function dump(obj)
local getIndent, quoteStr, wrapKey, wrapVal, dumpObj
getIndent = function(level)
return string.rep("\t", level)
end
quoteStr = function(str)
return '"' .. string.gsub(str, '"', '\\"') .. '"'
end
wrapKey = function(val)
if type(val) == "number" then
return "[" .. val .. "]"
elseif type(val) == "string" then
return "[" .. quoteStr(val) .. "]"
else
return "[" .. tostring(val) .. "]"
end
end
wrapVal = function(val, level)
if type(val) == "table" then
return dumpObj(val, level)
elseif type(val) == "number" then
return val
elseif type(val) == "string" then
return quoteStr(val)
else
return tostring(val)
end
end
dumpObj = function(obj, level)
if type(obj) ~= "table" then
return wrapVal(obj)
end
level = level + 1
local tokens = {}
tokens[#tokens + 1] = "{"
for k, v in pairs(obj) do
tokens[#tokens + 1] = getIndent(level) .. wrapKey(k) .. " = " .. wrapVal(v, level) .. ","
end
tokens[#tokens + 1] = getIndent(level - 1) .. "}"
return table.concat(tokens, "\n")
end
return dumpObj(obj, 0)
end
local conn
local res = driver.connect(config)
if res.code ~=0 then
......@@ -37,7 +81,7 @@ else
print("select db--- pass.")
end
res = driver.query(conn,"create table m1 (ts timestamp, speed int,owner binary(20))")
res = driver.query(conn,"create table m1 (ts timestamp, speed int, owner binary(20), mark nchar(30))")
if res.code ~=0 then
print("create table---failed: "..res.error)
return
......@@ -45,7 +89,7 @@ else
print("create table--- pass.")
end
res = driver.query(conn,"insert into m1 values ('2019-09-01 00:00:00.001',0,'robotspace'), ('2019-09-01 00:00:00.002',1,'Hilink'),('2019-09-01 00:00:00.003',2,'Harmony')")
res = driver.query(conn,"insert into m1 values ('2019-09-01 00:00:00.001', 0, 'robotspace', '世界人民大团结万岁'), ('2019-09-01 00:00:00.002', 1, 'Hilink', '⾾⾿⿀⿁⿂⿃⿄⿅⿆⿇⿈⿉⿊⿋⿌⿍⿎⿏⿐⿑⿒⿓⿔⿕'),('2019-09-01 00:00:00.003', 2, 'Harmony', '₠₡₢₣₤₥₦₧₨₩₪₫€₭₮₯₰₱₲₳₴₵')")
if res.code ~=0 then
print("insert records failed: "..res.error)
return
......@@ -64,21 +108,25 @@ if res.code ~=0 then
return
else
if (#(res.item) == 3) then
print("select--- pass")
print("select--- pass")
print(res.item[1].mark)
print(res.item[2].mark)
print(res.item[3].mark)
else
print("select--- failed: expect 3 affected records, actually received "..#(res.item))
end
end
res = driver.query(conn,"CREATE TABLE thermometer (ts timestamp, degree double) TAGS(location binary(20), type int)")
res = driver.query(conn,"create table thermometer (ts timestamp, degree double) tags(location binary(20), type int)")
if res.code ~=0 then
print(res.error)
return
else
print("create super table--- pass")
end
res = driver.query(conn,"CREATE TABLE therm1 USING thermometer TAGS ('beijing', 1)")
res = driver.query(conn,"create table therm1 using thermometer tags ('beijing', 1)")
if res.code ~=0 then
print(res.error)
return
......@@ -86,7 +134,7 @@ else
print("create table--- pass")
end
res = driver.query(conn,"INSERT INTO therm1 VALUES ('2019-09-01 00:00:00.001', 20),('2019-09-01 00:00:00.002', 21)")
res = driver.query(conn,"insert into therm1 values ('2019-09-01 00:00:00.001', 20),('2019-09-01 00:00:00.002', 21)")
if res.code ~=0 then
print(res.error)
......@@ -99,14 +147,14 @@ else
end
end
res = driver.query(conn,"SELECT COUNT(*) count, AVG(degree) AS av, MAX(degree), MIN(degree) FROM thermometer WHERE location='beijing' or location='tianjin' GROUP BY location, type")
res = driver.query(conn,"select count(*) as cnt, avg(degree) as av, max(degree), min(degree) from thermometer where location='beijing' or location='tianjin' group by location, type")
if res.code ~=0 then
print("select from super table--- failed:"..res.error)
return
else
print("select from super table--- pass")
for i = 1, #(res.item) do
print("res:"..res.item[i].count)
print("res:"..res.item[i].cnt)
end
end
......@@ -127,30 +175,13 @@ end
driver.query_a(conn,"INSERT INTO therm1 VALUES ('2019-09-01 00:00:00.005', 100),('2019-09-01 00:00:00.006', 101),('2019-09-01 00:00:00.007', 102)", async_query_callback)
res = driver.query(conn, "create stream stream_avg_degree into avg_degree as select avg(degree) from thermometer interval(5s) sliding(1s)")
function stream_callback(t)
print("------------------------")
print("continuous query result:")
for key, value in pairs(t) do
print("key:"..key..", value:"..value)
end
end
local stream
res = driver.open_stream(conn,"SELECT COUNT(*) as count, AVG(degree) as avg, MAX(degree) as max, MIN(degree) as min FROM thermometer interval(2s) sliding(2s);)",0, stream_callback)
if res.code ~=0 then
print("open stream--- failed:"..res.error)
return
else
print("open stream--- pass")
stream = res.stream
end
print("From now on we start continous insert in an definite (infinite if you want) loop.")
print("From now on we start continous insert in an definite loop, pls wait for about 10 seconds and check stream table for result.")
local loop_index = 0
while loop_index < 30 do
while loop_index < 10 do
local t = os.time()*1000
local v = loop_index
local v = math.random(20)
res = driver.query(conn,string.format("INSERT INTO therm1 VALUES (%d, %d)",t,v))
if res.code ~=0 then
......@@ -162,7 +193,5 @@ while loop_index < 30 do
os.execute("sleep " .. 1)
loop_index = loop_index + 1
end
driver.close_stream(stream)
driver.query(conn,"DROP STREAM IF EXISTS avg_therm_s")
driver.close(conn)
......@@ -116,6 +116,7 @@ enum {
STREAM_INPUT__DATA_RETRIEVE,
STREAM_INPUT__GET_RES,
STREAM_INPUT__CHECKPOINT,
STREAM_INPUT__REF_DATA_BLOCK,
STREAM_INPUT__DESTROY,
};
......
......@@ -94,7 +94,10 @@ extern int64_t tsQueryBufferSizeBytes; // maximum allowed usage buffer size in
// query client
extern int32_t tsQueryPolicy;
extern int32_t tsQuerySmaOptimize;
extern int32_t tsQueryRsmaTolerance;
extern bool tsQueryPlannerTrace;
extern int32_t tsQueryNodeChunkSize;
extern bool tsQueryUseNodeAllocator;
// client
extern int32_t tsMinSlidingTime;
......
......@@ -1424,6 +1424,14 @@ typedef struct {
SExplainExecInfo* subplanInfo;
} SExplainRsp;
typedef struct {
SExplainRsp rsp;
uint64_t qId;
uint64_t tId;
int64_t rId;
int32_t eId;
} SExplainLocalRsp;
typedef struct STableScanAnalyzeInfo {
uint64_t totalRows;
uint64_t totalCheckedRows;
......@@ -1438,6 +1446,7 @@ typedef struct STableScanAnalyzeInfo {
int32_t tSerializeSExplainRsp(void* buf, int32_t bufLen, SExplainRsp* pRsp);
int32_t tDeserializeSExplainRsp(void* buf, int32_t bufLen, SExplainRsp* pRsp);
void tFreeSExplainRsp(SExplainRsp *pRsp);
typedef struct {
char fqdn[TSDB_FQDN_LEN]; // end point, hostname:port
......@@ -2337,6 +2346,7 @@ int32_t tSerializeSClientHbBatchReq(void* buf, int32_t bufLen, const SClientHbBa
int32_t tDeserializeSClientHbBatchReq(void* buf, int32_t bufLen, SClientHbBatchReq* pReq);
static FORCE_INLINE void tFreeClientHbBatchReq(void* pReq) {
if (pReq == NULL) return;
SClientHbBatchReq* req = (SClientHbBatchReq*)pReq;
taosArrayDestroyEx(req->reqs, tFreeClientHbReq);
taosMemoryFree(pReq);
......
......@@ -234,96 +234,100 @@
#define TK_CURRENT_USER 216
#define TK_COUNT 217
#define TK_LAST_ROW 218
#define TK_BETWEEN 219
#define TK_IS 220
#define TK_NK_LT 221
#define TK_NK_GT 222
#define TK_NK_LE 223
#define TK_NK_GE 224
#define TK_NK_NE 225
#define TK_MATCH 226
#define TK_NMATCH 227
#define TK_CONTAINS 228
#define TK_IN 229
#define TK_JOIN 230
#define TK_INNER 231
#define TK_SELECT 232
#define TK_DISTINCT 233
#define TK_WHERE 234
#define TK_PARTITION 235
#define TK_BY 236
#define TK_SESSION 237
#define TK_STATE_WINDOW 238
#define TK_SLIDING 239
#define TK_FILL 240
#define TK_VALUE 241
#define TK_NONE 242
#define TK_PREV 243
#define TK_LINEAR 244
#define TK_NEXT 245
#define TK_HAVING 246
#define TK_RANGE 247
#define TK_EVERY 248
#define TK_ORDER 249
#define TK_SLIMIT 250
#define TK_SOFFSET 251
#define TK_LIMIT 252
#define TK_OFFSET 253
#define TK_ASC 254
#define TK_NULLS 255
#define TK_ABORT 256
#define TK_AFTER 257
#define TK_ATTACH 258
#define TK_BEFORE 259
#define TK_BEGIN 260
#define TK_BITAND 261
#define TK_BITNOT 262
#define TK_BITOR 263
#define TK_BLOCKS 264
#define TK_CHANGE 265
#define TK_COMMA 266
#define TK_COMPACT 267
#define TK_CONCAT 268
#define TK_CONFLICT 269
#define TK_COPY 270
#define TK_DEFERRED 271
#define TK_DELIMITERS 272
#define TK_DETACH 273
#define TK_DIVIDE 274
#define TK_DOT 275
#define TK_EACH 276
#define TK_END 277
#define TK_FAIL 278
#define TK_FILE 279
#define TK_FOR 280
#define TK_GLOB 281
#define TK_ID 282
#define TK_IMMEDIATE 283
#define TK_IMPORT 284
#define TK_INITIALLY 285
#define TK_INSTEAD 286
#define TK_ISNULL 287
#define TK_KEY 288
#define TK_NK_BITNOT 289
#define TK_NK_SEMI 290
#define TK_NOTNULL 291
#define TK_OF 292
#define TK_PLUS 293
#define TK_PRIVILEGE 294
#define TK_RAISE 295
#define TK_REPLACE 296
#define TK_RESTRICT 297
#define TK_ROW 298
#define TK_SEMI 299
#define TK_STAR 300
#define TK_STATEMENT 301
#define TK_STRING 302
#define TK_TIMES 303
#define TK_UPDATE 304
#define TK_VALUES 305
#define TK_VARIABLE 306
#define TK_VIEW 307
#define TK_WAL 308
#define TK_CASE 219
#define TK_END 220
#define TK_WHEN 221
#define TK_THEN 222
#define TK_ELSE 223
#define TK_BETWEEN 224
#define TK_IS 225
#define TK_NK_LT 226
#define TK_NK_GT 227
#define TK_NK_LE 228
#define TK_NK_GE 229
#define TK_NK_NE 230
#define TK_MATCH 231
#define TK_NMATCH 232
#define TK_CONTAINS 233
#define TK_IN 234
#define TK_JOIN 235
#define TK_INNER 236
#define TK_SELECT 237
#define TK_DISTINCT 238
#define TK_WHERE 239
#define TK_PARTITION 240
#define TK_BY 241
#define TK_SESSION 242
#define TK_STATE_WINDOW 243
#define TK_SLIDING 244
#define TK_FILL 245
#define TK_VALUE 246
#define TK_NONE 247
#define TK_PREV 248
#define TK_LINEAR 249
#define TK_NEXT 250
#define TK_HAVING 251
#define TK_RANGE 252
#define TK_EVERY 253
#define TK_ORDER 254
#define TK_SLIMIT 255
#define TK_SOFFSET 256
#define TK_LIMIT 257
#define TK_OFFSET 258
#define TK_ASC 259
#define TK_NULLS 260
#define TK_ABORT 261
#define TK_AFTER 262
#define TK_ATTACH 263
#define TK_BEFORE 264
#define TK_BEGIN 265
#define TK_BITAND 266
#define TK_BITNOT 267
#define TK_BITOR 268
#define TK_BLOCKS 269
#define TK_CHANGE 270
#define TK_COMMA 271
#define TK_COMPACT 272
#define TK_CONCAT 273
#define TK_CONFLICT 274
#define TK_COPY 275
#define TK_DEFERRED 276
#define TK_DELIMITERS 277
#define TK_DETACH 278
#define TK_DIVIDE 279
#define TK_DOT 280
#define TK_EACH 281
#define TK_FAIL 282
#define TK_FILE 283
#define TK_FOR 284
#define TK_GLOB 285
#define TK_ID 286
#define TK_IMMEDIATE 287
#define TK_IMPORT 288
#define TK_INITIALLY 289
#define TK_INSTEAD 290
#define TK_ISNULL 291
#define TK_KEY 292
#define TK_NK_BITNOT 293
#define TK_NK_SEMI 294
#define TK_NOTNULL 295
#define TK_OF 296
#define TK_PLUS 297
#define TK_PRIVILEGE 298
#define TK_RAISE 299
#define TK_REPLACE 300
#define TK_RESTRICT 301
#define TK_ROW 302
#define TK_SEMI 303
#define TK_STAR 304
#define TK_STATEMENT 305
#define TK_STRING 306
#define TK_TIMES 307
#define TK_UPDATE 308
#define TK_VALUES 309
#define TK_VARIABLE 310
#define TK_VIEW 311
#define TK_WAL 312
#define TK_NK_SPACE 300
#define TK_NK_COMMENT 301
......
......@@ -29,6 +29,15 @@ typedef void* DataSinkHandle;
struct SRpcMsg;
struct SSubplan;
typedef int32_t (*localFetchFp)(void *, uint64_t, uint64_t, uint64_t, int64_t, int32_t, void**, SArray*);
typedef struct {
void *handle;
bool localExec;
localFetchFp fp;
SArray *explainRes;
} SLocalFetch;
typedef struct {
void* tqReader;
void* meta;
......@@ -127,7 +136,7 @@ int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* table
* @param handle
* @return
*/
int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds);
int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds, SLocalFetch *pLocal);
int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pBlock, uint64_t* useconds);
/**
......
......@@ -78,7 +78,6 @@ enum {
MAIN_SCAN = 0x0u,
REVERSE_SCAN = 0x1u, // todo remove it
REPEAT_SCAN = 0x2u, // repeat scan belongs to the master scan
MERGE_STAGE = 0x20u,
};
typedef struct SPoint1 {
......@@ -156,11 +155,6 @@ typedef struct SqlFunctionCtx {
char udfName[TSDB_FUNC_NAME_LEN];
} SqlFunctionCtx;
enum {
TEXPR_BINARYEXPR_NODE = 0x1,
TEXPR_UNARYEXPR_NODE = 0x2,
};
typedef struct tExprNode {
int32_t nodeType;
union {
......@@ -182,8 +176,9 @@ struct SScalarParam {
SColumnInfoData *columnData;
SHashObj *pHashFilter;
int32_t hashValueType;
void *param; // other parameter, such as meta handle from vnode, to extract table name/tag value
void *param; // other parameter, such as meta handle from vnode, to extract table name/tag value
int32_t numOfRows;
int32_t numOfQualified; // number of qualified elements in the final results
};
void cleanupResultRowEntry(struct SResultRowEntryInfo *pCell);
......@@ -201,8 +196,6 @@ int32_t taosGetLinearInterpolationVal(SPoint *point, int32_t outputType, SPoint
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// udf api
struct SUdfInfo;
/**
* create udfd proxy, called once in process that call doSetupUdf/callUdfxxx/doTeardownUdf
* @return error code
......@@ -226,6 +219,7 @@ int32_t udfStartUdfd(int32_t startDnodeId);
* @return
*/
int32_t udfStopUdfd();
#ifdef __cplusplus
}
#endif
......
......@@ -103,6 +103,8 @@ typedef enum ENodeType {
QUERY_NODE_STREAM_OPTIONS,
QUERY_NODE_LEFT_VALUE,
QUERY_NODE_COLUMN_REF,
QUERY_NODE_WHEN_THEN,
QUERY_NODE_CASE_WHEN,
// Statement nodes are used in parser and planner module.
QUERY_NODE_SET_OPERATOR = 100,
......@@ -275,6 +277,17 @@ typedef struct SNodeList {
SListCell* pTail;
} SNodeList;
typedef struct SNodeAllocator SNodeAllocator;
int32_t nodesInitAllocatorSet();
void nodesDestroyAllocatorSet();
int32_t nodesCreateAllocator(int64_t queryId, int32_t chunkSize, int64_t* pAllocatorId);
int32_t nodesAcquireAllocator(int64_t allocatorId);
int32_t nodesReleaseAllocator(int64_t allocatorId);
int64_t nodesMakeAllocatorWeakRef(int64_t allocatorId);
int64_t nodesReleaseAllocatorWeakRef(int64_t allocatorId);
void nodesDestroyAllocator(int64_t allocatorId);
SNode* nodesMakeNode(ENodeType type);
void nodesDestroyNode(SNode* pNode);
......
......@@ -165,7 +165,8 @@ typedef struct SVnodeModifyLogicNode {
typedef struct SExchangeLogicNode {
SLogicNode node;
int32_t srcGroupId;
int32_t srcStartGroupId;
int32_t srcEndGroupId;
} SExchangeLogicNode;
typedef struct SMergeLogicNode {
......@@ -395,11 +396,15 @@ typedef struct SDownstreamSourceNode {
uint64_t schedId;
int32_t execId;
int32_t fetchMsgType;
bool localExec;
} SDownstreamSourceNode;
typedef struct SExchangePhysiNode {
SPhysiNode node;
int32_t srcGroupId; // group id of datasource suplans
// for set operators, there will be multiple execution groups under one exchange, and the ids of these execution
// groups are consecutive
int32_t srcStartGroupId;
int32_t srcEndGroupId;
bool singleChannel;
SNodeList* pSrcEndPoints; // element is SDownstreamSource, scheduler fill by calling qSetSuplanExecutionNode
} SExchangePhysiNode;
......
......@@ -241,6 +241,19 @@ typedef struct SFillNode {
STimeWindow timeRange;
} SFillNode;
typedef struct SWhenThenNode {
SExprNode node; // QUERY_NODE_WHEN_THEN
SNode* pWhen;
SNode* pThen;
} SWhenThenNode;
typedef struct SCaseWhenNode {
SExprNode node; // QUERY_NODE_CASE_WHEN
SNode* pCase;
SNode* pElse;
SNodeList* pWhenThenList;
} SCaseWhenNode;
typedef struct SSelectStmt {
ENodeType type; // QUERY_NODE_SELECT_STMT
bool isDistinct;
......
......@@ -56,6 +56,7 @@ typedef struct SParseContext {
bool nodeOffline;
SArray* pTableMetaPos; // sql table pos => catalog data pos
SArray* pTableVgroupPos; // sql table pos => catalog data pos
int64_t allocatorId;
} SParseContext;
int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery);
......
......@@ -39,6 +39,7 @@ typedef struct SPlanContext {
int32_t msgLen;
const char* pUser;
bool sysInfo;
int64_t allocatorId;
} SPlanContext;
// Create the physical plan for the query, according to the AST.
......
......@@ -52,6 +52,7 @@ typedef enum {
#define QUERY_POLICY_VNODE 1
#define QUERY_POLICY_HYBRID 2
#define QUERY_POLICY_QNODE 3
#define QUERY_POLICY_CLIENT 4
typedef struct STableComInfo {
uint8_t numOfTags; // the number of tags in schema
......@@ -269,43 +270,43 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t
#define qFatal(...) \
do { \
if (qDebugFlag & DEBUG_FATAL) { \
taosPrintLog("QRY FATAL ", DEBUG_FATAL, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
taosPrintLog("QRY FATAL ", DEBUG_FATAL, qDebugFlag, __VA_ARGS__); \
} \
} while (0)
#define qError(...) \
do { \
if (qDebugFlag & DEBUG_ERROR) { \
taosPrintLog("QRY ERROR ", DEBUG_ERROR, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
taosPrintLog("QRY ERROR ", DEBUG_ERROR, qDebugFlag, __VA_ARGS__); \
} \
} while (0)
#define qWarn(...) \
do { \
if (qDebugFlag & DEBUG_WARN) { \
taosPrintLog("QRY WARN ", DEBUG_WARN, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
taosPrintLog("QRY WARN ", DEBUG_WARN, qDebugFlag, __VA_ARGS__); \
} \
} while (0)
#define qInfo(...) \
do { \
if (qDebugFlag & DEBUG_INFO) { \
taosPrintLog("QRY ", DEBUG_INFO, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
taosPrintLog("QRY ", DEBUG_INFO, qDebugFlag, __VA_ARGS__); \
} \
} while (0)
#define qDebug(...) \
do { \
if (qDebugFlag & DEBUG_DEBUG) { \
taosPrintLog("QRY ", DEBUG_DEBUG, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
taosPrintLog("QRY ", DEBUG_DEBUG, qDebugFlag, __VA_ARGS__); \
} \
} while (0)
#define qTrace(...) \
do { \
if (qDebugFlag & DEBUG_TRACE) { \
taosPrintLog("QRY ", DEBUG_TRACE, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
taosPrintLog("QRY ", DEBUG_TRACE, qDebugFlag, __VA_ARGS__); \
} \
} while (0)
#define qDebugL(...) \
do { \
if (qDebugFlag & DEBUG_DEBUG) { \
taosPrintLongString("QRY ", DEBUG_DEBUG, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
taosPrintLongString("QRY ", DEBUG_DEBUG, qDebugFlag, __VA_ARGS__); \
} \
} while (0)
......
......@@ -29,6 +29,7 @@ enum {
NODE_TYPE_QNODE,
NODE_TYPE_SNODE,
NODE_TYPE_MNODE,
NODE_TYPE_CLIENT,
};
typedef struct SQWorkerCfg {
......@@ -55,7 +56,24 @@ typedef struct {
uint64_t numOfErrors;
} SQWorkerStat;
int32_t qWorkerInit(int8_t nodeType, int32_t nodeId, SQWorkerCfg *cfg, void **qWorkerMgmt, const SMsgCb *pMsgCb);
typedef struct SQWMsgInfo {
int8_t taskType;
int8_t explain;
int8_t needFetch;
} SQWMsgInfo;
typedef struct SQWMsg {
void *node;
int32_t code;
int32_t msgType;
void *msg;
int32_t msgLen;
SQWMsgInfo msgInfo;
SRpcHandleInfo connInfo;
} SQWMsg;
int32_t qWorkerInit(int8_t nodeType, int32_t nodeId, void **qWorkerMgmt, const SMsgCb *pMsgCb);
int32_t qWorkerAbortPreprocessQueryMsg(void *qWorkerMgmt, SRpcMsg *pMsg);
......@@ -77,10 +95,14 @@ int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_
int32_t qWorkerProcessDeleteMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, SDeleteRes *pRes);
void qWorkerDestroy(void **qWorkerMgmt);
void qWorkerDestroy(void **qWorkerMgmt);
int32_t qWorkerGetStat(SReadHandle *handle, void *qWorkerMgmt, SQWorkerStat *pStat);
int32_t qWorkerProcessLocalQuery(void *pMgmt, uint64_t sId, uint64_t qId, uint64_t tId, int64_t rId, int32_t eId, SQWMsg *qwMsg, SArray *explainRes);
int32_t qWorkerProcessLocalFetch(void *pMgmt, uint64_t sId, uint64_t qId, uint64_t tId, int64_t rId, int32_t eId, void** pRsp, SArray* explainRes);
#ifdef __cplusplus
}
#endif
......
......@@ -31,13 +31,17 @@ enum {
FLT_OPTION_NEED_UNIQE = 4,
};
#define FILTER_RESULT_ALL_QUALIFIED 0x1
#define FILTER_RESULT_NONE_QUALIFIED 0x2
#define FILTER_RESULT_PARTIAL_QUALIFIED 0x3
typedef struct SFilterColumnParam {
int32_t numOfCols;
SArray *pDataBlock;
} SFilterColumnParam;
extern int32_t filterInitFromNode(SNode *pNode, SFilterInfo **pinfo, uint32_t options);
extern bool filterExecute(SFilterInfo *info, SSDataBlock *pSrc, int8_t **p, SColumnDataAgg *statis, int16_t numOfCols);
extern bool filterExecute(SFilterInfo *info, SSDataBlock *pSrc, SColumnInfoData** p, SColumnDataAgg *statis, int16_t numOfCols, int32_t* pFilterResStatus);
extern int32_t filterSetDataFromSlotId(SFilterInfo *info, void *param);
extern int32_t filterSetDataFromColId(SFilterInfo *info, void *param);
extern int32_t filterGetTimeRange(SNode *pNode, STimeWindow *win, bool *isStrict);
......
......@@ -64,9 +64,11 @@ typedef bool (*schedulerChkKillFp)(void* param);
typedef struct SSchedulerReq {
bool syncReq;
bool localReq;
SRequestConnInfo *pConn;
SArray *pNodeList;
SQueryPlan *pDag;
int64_t allocatorRefId;
const char *sql;
int64_t startTs;
schedulerExecFp execFp;
......
......@@ -125,6 +125,14 @@ typedef struct {
SArray* blocks; // SArray<SSDataBlock>
} SStreamDataBlock;
// ref data block, for delete
typedef struct {
int8_t type;
int64_t ver;
int32_t* dataRef;
SSDataBlock* pBlock;
} SStreamRefDataBlock;
typedef struct {
int8_t type;
} SStreamCheckpoint;
......@@ -339,7 +347,8 @@ static FORCE_INLINE int32_t streamTaskInput(SStreamTask* pTask, SStreamQueueItem
qDebug("task %d %p submit enqueue %p %p %p", pTask->taskId, pTask, pItem, pSubmitClone, pSubmitClone->data);
taosWriteQitem(pTask->inputQueue->queue, pSubmitClone);
// qStreamInput(pTask->exec.executor, pSubmitClone);
} else if (pItem->type == STREAM_INPUT__DATA_BLOCK || pItem->type == STREAM_INPUT__DATA_RETRIEVE) {
} else if (pItem->type == STREAM_INPUT__DATA_BLOCK || pItem->type == STREAM_INPUT__DATA_RETRIEVE ||
pItem->type == STREAM_INPUT__REF_DATA_BLOCK) {
taosWriteQitem(pTask->inputQueue->queue, pItem);
// qStreamInput(pTask->exec.executor, pItem);
} else if (pItem->type == STREAM_INPUT__CHECKPOINT) {
......@@ -492,7 +501,9 @@ typedef struct {
int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq);
int32_t tDecodeStreamRetrieveReq(SDecoder* pDecoder, SStreamRetrieveReq* pReq);
void tFreeStreamDispatchReq(SStreamDispatchReq* pReq);
void tDeleteStreamRetrieveReq(SStreamRetrieveReq* pReq);
void tDeleteStreamDispatchReq(SStreamDispatchReq* pReq);
int32_t streamSetupTrigger(SStreamTask* pTask);
......
......@@ -22,6 +22,7 @@ extern "C" {
#include "cJSON.h"
#include "tdef.h"
#include "tlrucache.h"
#include "tmsgcb.h"
extern bool gRaftDetailLog;
......@@ -153,7 +154,8 @@ typedef struct SSyncFSM {
// abstract definition of log store in raft
// SWal implements it
typedef struct SSyncLogStore {
void* data;
SLRUCache* pCache;
void* data;
// append one log entry
int32_t (*appendEntry)(struct SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry);
......
......@@ -552,7 +552,6 @@ int32_t* taosGetErrno();
#define TSDB_CODE_PAR_VALUE_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x2653)
#define TSDB_CODE_PAR_INVALID_DELETE_WHERE TAOS_DEF_ERROR_CODE(0, 0x2655)
#define TSDB_CODE_PAR_INVALID_REDISTRIBUTE_VG TAOS_DEF_ERROR_CODE(0, 0x2656)
#define TSDB_CODE_PAR_FILL_NOT_ALLOWED_FUNC TAOS_DEF_ERROR_CODE(0, 0x2657)
#define TSDB_CODE_PAR_INVALID_WINDOW_PC TAOS_DEF_ERROR_CODE(0, 0x2658)
#define TSDB_CODE_PAR_WINDOW_NOT_ALLOWED_FUNC TAOS_DEF_ERROR_CODE(0, 0x2659)
......@@ -565,6 +564,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_PAR_INVALID_SELECTED_EXPR TAOS_DEF_ERROR_CODE(0, 0x2661)
#define TSDB_CODE_PAR_GET_META_ERROR TAOS_DEF_ERROR_CODE(0, 0x2662)
#define TSDB_CODE_PAR_NOT_UNIQUE_TABLE_ALIAS TAOS_DEF_ERROR_CODE(0, 0x2663)
#define TSDB_CODE_PAR_NOT_SUPPORT_JOIN TAOS_DEF_ERROR_CODE(0, 0x2664)
#define TSDB_CODE_PAR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x26FF)
//planner
......
......@@ -225,7 +225,8 @@ typedef enum ELogicConditionType {
#define TSDB_APP_NAME_LEN TSDB_UNI_LEN
#define TSDB_TB_COMMENT_LEN 1025
#define TSDB_QUERY_ID_LEN 26
#define TSDB_QUERY_ID_LEN 26
#define TSDB_TRANS_OPER_LEN 16
/**
* In some scenarios uint16_t (0~65535) is used to store the row len.
......@@ -482,6 +483,7 @@ enum {
#define SNODE_HANDLE -2
#define VNODE_HANDLE -3
#define BNODE_HANDLE -4
#define CLIENT_HANDLE -5
#define TSDB_CONFIG_OPTION_LEN 32
#define TSDB_CONFIG_VALUE_LEN 64
......
......@@ -62,6 +62,7 @@ static FORCE_INLINE void taosEncryptPass_c(uint8_t *inBuf, size_t len, char *tar
tMD5Final(&context);
char buf[TSDB_PASSWORD_LEN + 1];
buf[TSDB_PASSWORD_LEN] = 0;
sprintf(buf, "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", context.digest[0], context.digest[1],
context.digest[2], context.digest[3], context.digest[4], context.digest[5], context.digest[6],
context.digest[7], context.digest[8], context.digest[9], context.digest[10], context.digest[11],
......
#!/bin/bash
set -e
#set -x
set -v
# dockerbuild.sh
# -n [version number]
......@@ -11,8 +12,9 @@ set -e
version=""
passWord=""
verType=""
dockerLatest="n"
while getopts "hn:p:V:" arg
while getopts "hn:p:V:a:" arg
do
case $arg in
n)
......@@ -29,9 +31,15 @@ do
;;
h)
echo "Usage: `basename $0` -n [version number] "
echo " -p [password for docker hub] "
echo " -p [password for docker hub] "
echo " -V [stable |beta] "
echo " -a [y | n ] "
exit 0
;;
a)
#echo "dockerLatest=$OPTARG"
dockerLatest=$(echo $OPTARG)
;;
?) #unknow option
echo "unkonw argument"
exit 1
......@@ -41,42 +49,55 @@ done
echo "version=${version}"
#docker manifest rm tdengine/tdengine
#docker manifest rm tdengine/tdengine:${version}
if [ "$verType" == "beta" ]; then
docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version}
docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest
docker manifest rm tdengine/tdengine-beta:${version}
docker manifest rm tdengine/tdengine-beta:latest
docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version}
docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest
docker manifest inspect tdengine/tdengine:latest
docker manifest inspect tdengine/tdengine:${version}
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
docker manifest push tdengine/tdengine-beta:${version}
docker manifest push tdengine/tdengine-beta:latest
elif [ "$verType" == "stable" ]; then
docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
docker manifest rm tdengine/tdengine:latest
docker manifest rm tdengine/tdengine:${version}
docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
docker manifest inspect tdengine/tdengine:latest
docker manifest inspect tdengine/tdengine:${version}
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
docker manifest push tdengine/tdengine:${version}
docker manifest push tdengine/tdengine:latest
else
if [ "$verType" == "stable" ]; then
verType=stable
dockerinput=TDengine-server-${version}-Linux-$cpuType.tar.gz
dockerinput_x64=TDengine-server-${version}-Linux-amd64.tar.gz
dockerim=tdengine/tdengine
dockeramd64=tdengine/tdengine-amd64
dockeraarch64=tdengine/tdengine-aarch64
dockeraarch32=tdengine/tdengine-aarch32
elif [ "$verType" == "beta" ];then
verType=beta
tagVal=ver-${version}-beta
dockerinput=TDengine-server-${version}-${verType}-Linux-$cpuType.tar.gz
dockerinput_x64=TDengine-server-${version}-${verType}-Linux-amd64.tar.gz
dockerim=tdengine/tdengine-beta
dockeramd64=tdengine/tdengine-amd64-beta
dockeraarch64=tdengine/tdengine-aarch64-beta
dockeraarch32=tdengine/tdengine-aarch32-beta
else
echo "unknow verType, nor stabel or beta"
exit 1
fi
# docker manifest create -a tdengine/${dockername}:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
# docker manifest create -a tdengine/${dockername}:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
username="tdengine"
# docker login -u tdengine -p ${passWord} #replace the docker registry username and password
# generate docker verison
echo "generate ${dockerim}:${version}"
docker manifest create -a ${dockerim}:${version} ${dockeramd64}:${version} ${dockeraarch64}:${version}
docker manifest inspect ${dockerim}:${version}
docker manifest rm ${dockerim}:${version}
docker manifest create -a ${dockerim}:${version} ${dockeramd64}:${version} ${dockeraarch64}:${version}
docker manifest inspect ${dockerim}:${version}
docker login -u ${username} -p ${passWord}
docker manifest push ${dockerim}:${version}
# generate docker latest
echo "generate ${dockerim}:latest "
if [ ${dockerLatest} == 'y' ] ;then
echo "docker manifest create -a ${dockerim}:latest ${dockeramd64}:latest ${dockeraarch64}:latest"
docker manifest create -a ${dockerim}:latest ${dockeramd64}:latest ${dockeraarch64}:latest
docker manifest inspect ${dockerim}:latest
docker manifest rm ${dockerim}:latest
docker manifest create -a ${dockerim}:latest ${dockeramd64}:latest ${dockeraarch64}:latest
docker manifest inspect ${dockerim}:latest
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
docker manifest push ${dockerim}:latest
docker pull tdengine/tdengine:latest
fi
# docker manifest push tdengine/tdengine:latest
# # how set latest version ???
......@@ -149,26 +149,4 @@ rm -rf temp1.data
if [ ${dockerLatest} == 'y' ] ;then
docker tag tdengine/tdengine-${dockername}:${version} tdengine/tdengine-${dockername}:latest
docker push tdengine/tdengine-${dockername}:latest
echo ">>>>>>>>>>>>> check whether tdengine/tdengine-${dockername}:latest has been published correctly"
docker run -d --name doctestla -p 7030-7049:6030-6049 -p 7030-7049:6030-6049/udp tdengine/tdengine-${dockername}:latest
sleep 2
curl -u root:taosdata -d 'show variables;' 127.0.0.1:7041/rest/sql > temp2.data
version_latest=` cat temp2.data |jq .data| jq '.[]' |grep "version" -A 2 -B 1 | jq ".[1]" `
echo "${version_latest}"
if [ "${version_latest}" == "\"${version}\"" ] ; then
echo "docker version is right "
else
echo "docker version is wrong "
exit 1
fi
fi
rm -rf temp2.data
if [ -n "$(docker ps -aq)" ] ;then
echo "delte docker process"
docker stop $(docker ps -aq)
docker rm $(docker ps -aq)
fi
cd ${scriptDir}
rm -f ${pkgFile}
......@@ -44,8 +44,6 @@ cmake ../../ -G "NMake Makefiles JOM" -DCMAKE_MAKE_PROGRAM=jom -DBUILD_TOOLS=tru
cmake --build .
rd /s /Q C:\TDengine
cmake --install .
for /r c:\TDengine %%i in (*.dll) do signtool sign /f D:\\123.pfx /p taosdata %%i
for /r c:\TDengine %%i in (*.exe) do signtool sign /f D:\\123.pfx /p taosdata %%i
if not %errorlevel% == 0 ( call :RUNFAILED build x64 failed & exit /b 1)
cd %package_dir%
iscc /DMyAppInstallName="%packagServerName_x64%" /DMyAppVersion="%2" /DMyAppExcludeSource="" tools\tdengine.iss /O..\release
......@@ -53,7 +51,6 @@ if not %errorlevel% == 0 ( call :RUNFAILED package %packagServerName_x64% faile
iscc /DMyAppInstallName="%packagClientName_x64%" /DMyAppVersion="%2" /DMyAppExcludeSource="taosd.exe" tools\tdengine.iss /O..\release
if not %errorlevel% == 0 ( call :RUNFAILED package %packagClientName_x64% failed & exit /b 1)
for /r ..\release %%i in (*.exe) do signtool sign /f d:\\123.pfx /p taosdata %%i
goto EXIT0
:USAGE
......
......@@ -202,8 +202,8 @@ elif [[ ${packgeName} =~ "tar" ]];then
cd ${oriInstallPath}/${originTdpPath} && tar xf ${subFile}
fi
cd ${oriInstallPath}/${originTdpPath} && tree > ${installPath}/base_${originversion}_checkfile
cd ${installPath}/${tdPath} && tree > ${installPath}/now_${version}_checkfile
cd ${oriInstallPath}/${originTdpPath} && tree -I "driver" > ${installPath}/base_${originversion}_checkfile
cd ${installPath}/${tdPath} && tree -I "driver" > ${installPath}/now_${version}_checkfile
cd ${installPath}
diff ${installPath}/base_${originversion}_checkfile ${installPath}/now_${version}_checkfile > ${installPath}/diffFile.log
......@@ -215,6 +215,7 @@ elif [[ ${packgeName} =~ "tar" ]];then
exit -1
else
echoColor G "The number and names of files are the same as previous installation packages"
rm -rf ${installPath}/diffFile.log
fi
echoColor YD "===== install Package of tar ====="
cd ${installPath}/${tdPath}
......@@ -251,6 +252,9 @@ if [[ ${packgeName} =~ "server" ]] ;then
systemctl restart taosd
fi
rm -rf ${installPath}/${packgeName}
rm -rf ${installPath}/${tdPath}/
# if ([[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "tar" ]]) || [[ ${packgeName} =~ "client" ]] ;then
# echoColor G "===== install taos-tools when package is lite or client ====="
# cd ${installPath}
......
......@@ -59,9 +59,9 @@ enum {
#define SHOW_VARIABLES_RESULT_FIELD1_LEN (TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE)
#define SHOW_VARIABLES_RESULT_FIELD2_LEN (TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE)
#define TD_RES_QUERY(res) (*(int8_t*)res == RES_TYPE__QUERY)
#define TD_RES_TMQ(res) (*(int8_t*)res == RES_TYPE__TMQ)
#define TD_RES_TMQ_META(res) (*(int8_t*)res == RES_TYPE__TMQ_META)
#define TD_RES_QUERY(res) (*(int8_t*)res == RES_TYPE__QUERY)
#define TD_RES_TMQ(res) (*(int8_t*)res == RES_TYPE__TMQ)
#define TD_RES_TMQ_META(res) (*(int8_t*)res == RES_TYPE__TMQ_META)
#define TD_RES_TMQ_METADATA(res) (*(int8_t*)res == RES_TYPE__TMQ_METADATA)
typedef struct SAppInstInfo SAppInstInfo;
......@@ -250,6 +250,8 @@ typedef struct SRequestObj {
bool inRetry;
uint32_t prevCode; // previous error code: todo refactor, add update flag for catalog
uint32_t retry;
int64_t allocatorRefId;
SQuery* pQuery;
} SRequestObj;
typedef struct SSyncQueryParam {
......
......@@ -27,6 +27,7 @@
#include "trpc.h"
#include "tsched.h"
#include "ttime.h"
#include "qworker.h"
#define TSC_VAR_NOT_RELEASE 1
#define TSC_VAR_RELEASED 0
......@@ -288,6 +289,7 @@ void *createRequest(uint64_t connId, int32_t type) {
pRequest->body.resInfo.convertUcs4 = true; // convert ucs4 by default
pRequest->type = type;
pRequest->allocatorRefId = -1;
pRequest->pDb = getDbOfConnection(pTscObj);
pRequest->pTscObj = pTscObj;
......@@ -349,6 +351,8 @@ void doDestroyRequest(void *p) {
taosArrayDestroy(pRequest->tableList);
taosArrayDestroy(pRequest->dbList);
taosArrayDestroy(pRequest->targetTableList);
qDestroyQuery(pRequest->pQuery);
nodesDestroyAllocator(pRequest->allocatorRefId);
destroyQueryExecRes(&pRequest->body.resInfo.execRes);
......@@ -411,6 +415,7 @@ void taos_init_imp(void) {
initTaskQueue();
fmFuncMgtInit();
nodesInitAllocatorSet();
clientConnRefPool = taosOpenRef(200, destroyTscObj);
clientReqRefPool = taosOpenRef(40960, doDestroyRequest);
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册