未验证 提交 9c66c81e 编写于 作者: J jiajingbin 提交者: GitHub

Merge branch 'main' into fix/test_1x

# apr-util
ExternalProject_Add(aprutil-1
URL https://dlcdn.apache.org//apr/apr-util-1.6.3.tar.gz
URL_HASH SHA256=2b74d8932703826862ca305b094eef2983c27b39d5c9414442e9976a9acf1983
DOWNLOAD_NO_PROGRESS 1
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
#GIT_REPOSITORY https://github.com/apache/apr-util.git
#GIT_TAG 1.5.4
SOURCE_DIR "${TD_CONTRIB_DIR}/apr-util"
#BINARY_DIR ""
BUILD_IN_SOURCE TRUE
BUILD_ALWAYS 1
#UPDATE_COMMAND ""
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --with-apr=$ENV{HOME}/.cos-local.1
#CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr
BUILD_COMMAND make
INSTALL_COMMAND make install
TEST_COMMAND ""
)
# apr
ExternalProject_Add(apr-1
URL https://dlcdn.apache.org//apr/apr-1.7.4.tar.gz
URL_HASH SHA256=a4137dd82a185076fa50ba54232d920a17c6469c30b0876569e1c2a05ff311d9
DOWNLOAD_NO_PROGRESS 1
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
#GIT_REPOSITORY https://github.com/apache/apr.git
#GIT_TAG 1.5.2
SOURCE_DIR "${TD_CONTRIB_DIR}/apr"
BUILD_IN_SOURCE TRUE
UPDATE_DISCONNECTED TRUE
BUILD_ALWAYS 1
#UPDATE_COMMAND ""
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --enable-shared=no
#CONFIGURE_COMMAND ./configure
BUILD_COMMAND make
INSTALL_COMMAND make install
TEST_COMMAND ""
)
......@@ -77,6 +77,12 @@ ELSE ()
SET(TD_TAOS_TOOLS TRUE)
ENDIF ()
IF (${TD_WINDOWS})
SET(TAOS_LIB taos_static)
ELSE ()
SET(TAOS_LIB taos)
ENDIF ()
IF (TD_WINDOWS)
MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}")
SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd")
......
......@@ -125,6 +125,16 @@ option(
ON
)
IF(${TD_LINUX})
option(
BUILD_WITH_COS
"If build with cos"
ON
)
ENDIF ()
option(
BUILD_WITH_SQLITE
"If build with sqlite"
......
......@@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "3.1.0.4.alpha")
SET(TD_VER_NUMBER "3.1.1.0.alpha")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
......
# cos
ExternalProject_Add(cos
GIT_REPOSITORY https://github.com/tencentyun/cos-c-sdk-v5.git
GIT_TAG v5.0.16
SOURCE_DIR "${TD_CONTRIB_DIR}/cos-c-sdk-v5"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)
# curl
ExternalProject_Add(curl
URL https://curl.se/download/curl-8.2.1.tar.gz
DOWNLOAD_NO_PROGRESS 1
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
#GIT_REPOSITORY https://github.com/curl/curl.git
#GIT_TAG curl-7_88_1
SOURCE_DIR "${TD_CONTRIB_DIR}/curl"
BUILD_IN_SOURCE TRUE
BUILD_ALWAYS 1
#UPDATE_COMMAND ""
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --without-ssl --enable-shared=no --disable-ldap --disable-ldaps --without-brotli --without-zstd
#CONFIGURE_COMMAND ./configure --without-ssl
BUILD_COMMAND make
INSTALL_COMMAND make install
TEST_COMMAND ""
)
# cos
ExternalProject_Add(mxml
GIT_REPOSITORY https://github.com/michaelrsweet/mxml.git
GIT_TAG v2.12
SOURCE_DIR "${TD_CONTRIB_DIR}/mxml"
#BINARY_DIR ""
BUILD_IN_SOURCE TRUE
#UPDATE_COMMAND ""
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --enable-shared=no
#CONFIGURE_COMMAND ./configure
BUILD_COMMAND make
INSTALL_COMMAND make install
TEST_COMMAND ""
)
......@@ -6,6 +6,39 @@ function(cat IN_FILE OUT_FILE)
file(APPEND ${OUT_FILE} "${CONTENTS}")
endfunction(cat IN_FILE OUT_FILE)
if(${TD_LINUX})
set(CONTRIB_TMP_FILE3 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in3")
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
if(${BUILD_WITH_COS})
file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.1/)
cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
endif(${BUILD_WITH_COS})
configure_file(${CONTRIB_TMP_FILE3} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
set(CONTRIB_TMP_FILE2 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in2")
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2})
if(${BUILD_WITH_COS})
cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2})
endif(${BUILD_WITH_COS})
configure_file(${CONTRIB_TMP_FILE2} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
endif(${TD_LINUX})
set(CONTRIB_TMP_FILE "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in")
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
......@@ -122,6 +155,16 @@ if(${BUILD_WITH_SQLITE})
cat("${TD_SUPPORT_DIR}/sqlite_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif(${BUILD_WITH_SQLITE})
# cos
if(${BUILD_WITH_COS})
#cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
#cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
#cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
#cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
add_definitions(-DUSE_COS)
endif(${BUILD_WITH_COS})
# lucene
if(${BUILD_WITH_LUCENE})
cat("${TD_SUPPORT_DIR}/lucene_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
......@@ -347,6 +390,31 @@ if (${BUILD_WITH_ROCKSDB})
endif()
endif()
# cos
if(${BUILD_WITH_COS})
if(${TD_LINUX})
set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.1)
#ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a)
option(ENABLE_TEST "Enable the tests" OFF)
INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local.1/include)
MESSAGE("$ENV{HOME}/.cos-local.1/include")
set(CMAKE_BUILD_TYPE debug)
set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME})
set(CMAKE_PROJECT_NAME cos_c_sdk)
add_subdirectory(cos-c-sdk-v5 EXCLUDE_FROM_ALL)
target_include_directories(
cos_c_sdk
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cos-c-sdk-v5/cos_c_sdk>
)
set(CMAKE_PROJECT_NAME ${ORIG_CMAKE_PROJECT_NAME})
else()
endif(${TD_LINUX})
endif(${BUILD_WITH_COS})
# lucene
# To support build on ubuntu: sudo apt-get install libboost-all-dev
if(${BUILD_WITH_LUCENE})
......
......@@ -3,6 +3,11 @@ if(${BUILD_WITH_ROCKSDB})
add_subdirectory(rocksdb)
endif(${BUILD_WITH_ROCKSDB})
# cos
if(${BUILD_WITH_COS})
add_subdirectory(cos)
endif(${BUILD_WITH_COS})
if(${BUILD_WITH_LUCENE})
add_subdirectory(lucene)
endif(${BUILD_WITH_LUCENE})
......
add_executable(cosTest "")
target_sources(cosTest
PRIVATE
"${CMAKE_CURRENT_SOURCE_DIR}/main.c"
)
#find_path(APR_INCLUDE_DIR apr-1/apr_time.h)
#find_path(APR_UTIL_INCLUDE_DIR apr/include/apr-1/apr_md5.h)
#find_path(MINIXML_INCLUDE_DIR mxml.h)
#find_path(CURL_INCLUDE_DIR curl/curl.h)
#include_directories (${MINIXML_INCLUDE_DIR})
#include_directories (${CURL_INCLUDE_DIR})
FIND_PROGRAM(APR_CONFIG_BIN NAMES apr-config apr-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/)
#FIND_PROGRAM(APU_CONFIG_BIN NAMES apu-config apu-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/)
IF (APR_CONFIG_BIN)
EXECUTE_PROCESS(
COMMAND ${APR_CONFIG_BIN} --includedir
OUTPUT_VARIABLE APR_INCLUDE_DIR
OUTPUT_STRIP_TRAILING_WHITESPACE
)
ENDIF()
#IF (APU_CONFIG_BIN)
# EXECUTE_PROCESS(
# COMMAND ${APU_CONFIG_BIN} --includedir
# OUTPUT_VARIABLE APR_UTIL_INCLUDE_DIR
# OUTPUT_STRIP_TRAILING_WHITESPACE
# )
#ENDIF()
include_directories (${APR_INCLUDE_DIR})
#include_directories (${APR_UTIL_INCLUDE_DIR})
target_include_directories(
cosTest
PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk"
)
#find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/)
#find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/)
#find_library(MINIXML_LIBRARY mxml)
#find_library(CURL_LIBRARY curl)
target_link_libraries(cosTest cos_c_sdk)
target_link_libraries(cosTest apr-1})
target_link_libraries(cosTest aprutil-1})
target_link_libraries(cosTest mxml)
target_link_libraries(cosTest curl)
此差异已折叠。
......@@ -24,24 +24,19 @@ By subscribing to a topic, a consumer can obtain the latest data in that topic i
To implement these features, TDengine indexes its write-ahead log (WAL) file for fast random access and provides configurable methods for replacing and retaining this file. You can define a retention period and size for this file. For information, see the CREATE DATABASE statement. In this way, the WAL file is transformed into a persistent storage engine that remembers the order in which events occur. However, note that configuring an overly long retention period for your WAL files makes database compression inefficient. TDengine then uses the WAL file instead of the time-series database as its storage engine for queries in the form of topics. TDengine reads the data from the WAL file; uses a unified query engine instance to perform filtering, transformations, and other operations; and finally pushes the data to consumers.
Tips:(c interface for example)
- A consumption group consumes all data under the same topic, and different consumption groups are independent of each other;
- A consumption group consumes all vgroups of the same topic, which can be composed of multiple consumers, but a vgroup is only consumed by one consumer. If the number of consumers exceeds the number of vgroups, the excess consumers do not consume data;
- On the server side, only one offset is saved for each vgroup, and the offsets for each vgroup are monotonically increasing, but not necessarily continuous. There is no correlation between the offsets of various vgroups;
- Each poll server will return a result block, which belongs to a vgroup and may contain data from multiple versions of wal. This block can be accessed through tmq_get_vgroup_offset. The offset interface obtains the offset of the first record in the block;
- If a consumer group has never committed an offset, when its member consumers restart and pull data again, they start consuming from the set value of the parameter auto.offset.reset; In a consumer lifecycle, the client locally records the offset of the most recent pull data and will not pull duplicate data;
- If a consumer terminates abnormally (without calling tmq_close), they need to wait for about 12 seconds to trigger their consumer group rebalance. The consumer's status on the server will change to LOST, and after about 1 day, the consumer will be automatically deleted; Exit normally, and after exiting, the consumer will be deleted; Add a new consumer, wait for about 2 seconds to trigger Rebalance, and the consumer's status on the server will change to ready;
- The consumer group Rebalance will reassign Vgroups to all consumer members in the ready state of the group, and consumers can only assign/see/commit/poll operations to the Vgroups they are responsible for;
- Consumers can tmq_position to obtain the offset of the current consumption, seek to the specified offset, and consume again;
- Seek points the position to the specified offset without executing the commit operation. Once the seek is successful, it can poll the specified offset and subsequent data;
- Position is to obtain the current consumption position, which is the position to be taken next time, not the current consumption position
- Commit is the submission of the consumption location. Without parameters, it is the submission of the current consumption location (the location to be taken next time, not the current consumption location). With parameters, it is the location in the submission parameters (i.e. the location to be taken after the next exit and restart)
- Seek is to set the consumer's consumption position. Wherever the seek goes, the position will be returned, all of which are the positions to be taken next time
- Seek does not affect commit, commit does not affect seek, independent of each other, the two are different concepts
- The begin interface is the offset of the first data in wal, and the end interface is the offset+1 of the last data in wal10.
- Before the seek operation, tmq must be call tmq_get_topic_assignment, The assignment interface obtains the vgroup ID and offset range of the consumer. The seek operation will detect whether the vgroup ID and offset are legal, and if they are illegal, an error will be reported;
- Due to the existence of a WAL expiration deletion mechanism, even if the seek operation is successful, it is possible that the offset has expired when polling data. If the offset of poll is less than the WAL minimum version number, it will be consumed from the WAL minimum version number;
- The tmq_get_vgroup_offset interface obtains the offset of the first data in the result block where the record is located. When seeking to this offset, it will consume all the data in this block. Refer to point four;
- Data subscription is to consume data from the wal. If some wal files are deleted according to WAL retention policy, the deleted data can't be consumed any more. So you need to set a reasonable value for parameter `WAL_RETENTION_PERIOD` or `WAL_RETENTION_SIZE` when creating the database and make sure your application consume the data in a timely way to make sure there is no data loss. This behavior is similar to Kafka and other widely used message queue products.
1. A consumption group consumes all data under the same topic, and different consumption groups are independent of each other;
2. A consumption group consumes all vgroups of the same topic, which can be composed of multiple consumers, but a vgroup is only consumed by one consumer. If the number of consumers exceeds the number of vgroups, the excess consumers do not consume data;
3. On the server side, only one offset is saved for each vgroup, and the offsets for each vgroup are monotonically increasing, but not necessarily continuous. There is no correlation between the offsets of various vgroups;
4. Each poll server will return a result block, which belongs to a vgroup and may contain data from multiple versions of wal. This block can be accessed through tmq_get_vgroup_offset. The offset interface obtains the offset of the first record in the block;
5. If a consumer group has never committed an offset, when its member consumers restart and pull data again, they start consuming from the set value of the parameter auto.offset.reset; In a consumer lifecycle, the client locally records the offset of the most recent pull data and will not pull duplicate data;
6. If a consumer terminates abnormally (without calling tmq_close), they need to wait for about 12 seconds to trigger their consumer group rebalance. The consumer's status on the server will change to LOST, and after about 1 day, the consumer will be automatically deleted; Exit normally, and after exiting, the consumer will be deleted; Add a new consumer, wait for about 2 seconds to trigger Rebalance, and the consumer's status on the server will change to ready;
7. The consumer group Rebalance will reassign Vgroups to all consumer members in the ready state of the group, and consumers can only assign/see/commit/poll operations to the Vgroups they are responsible for;
8. Consumers can tmq_position to obtain the offset of the current consumption, seek to the specified offset, and consume again;
9. Seek points the position to the specified offset without executing the commit operation. Once the seek is successful, it can poll the specified offset and subsequent data;
10. Before the seek operation, tmq must be call tmq_get_topic_assignment, The assignment interface obtains the vgroup ID and offset range of the consumer. The seek operation will detect whether the vgroup ID and offset are legal, and if they are illegal, an error will be reported;
11. Due to the existence of a WAL expiration deletion mechanism, even if the seek operation is successful, it is possible that the offset has expired when polling data. If the offset of poll is less than the WAL minimum version number, it will be consumed from the WAL minimum version number;
12. The tmq_get_vgroup_offset interface obtains the offset of the first data in the result block where the record is located. When seeking to this offset, it will consume all the data in this block. Refer to point four;
13. Data subscription is to consume data from the wal. If some wal files are deleted according to WAL retention policy, the deleted data can't be consumed any more. So you need to set a reasonable value for parameter `WAL_RETENTION_PERIOD` or `WAL_RETENTION_SIZE` when creating the database and make sure your application consume the data in a timely way to make sure there is no data loss. This behavior is similar to Kafka and other widely used message queue products.
## Data Schema and API
......@@ -104,8 +99,6 @@ The related schemas and APIs in various languages are described as follows:
DLL_EXPORT const char *tmq_err2str(int32_t code);
```
For more information, see [C/C++ Connector](/reference/connector/cpp).
The following example is based on the smart meter table described in Data Models. For complete sample code, see the C language section below.
</TabItem>
......@@ -120,7 +113,19 @@ Set<String> subscription() throws SQLException;
ConsumerRecords<V> poll(Duration timeout) throws SQLException;
Set<TopicPartition> assignment() throws SQLException;
long position(TopicPartition partition) throws SQLException;
Map<TopicPartition, Long> position(String topic) throws SQLException;
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
Map<TopicPartition, OffsetAndMetadata> committed(Set<TopicPartition> partitions) throws SQLException;
void seek(TopicPartition partition, long offset) throws SQLException;
void seekToBeginning(Collection<TopicPartition> partitions) throws SQLException;
void seekToEnd(Collection<TopicPartition> partitions) throws SQLException;
void commitSync() throws SQLException;
void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets) throws SQLException;
void close() throws SQLException;
```
......
......@@ -887,4 +887,4 @@ The `pycumsum` function finds the cumulative sum for all data in the input colum
</details>
## Manage and Use UDF
You need to add UDF to TDengine before using it in SQL queries. For more information about how to manage UDF and how to invoke UDF, please see [Manage and Use UDF](../12-taos-sql/26-udf.md).
You need to add UDF to TDengine before using it in SQL queries. For more information about how to manage UDF and how to invoke UDF, please see [Manage and Use UDF](../taos-sql/udf/).
......@@ -62,12 +62,13 @@ serverPort 6030
For all the dnodes in a TDengine cluster, the below parameters must be configured exactly the same, any node whose configuration is different from dnodes already in the cluster can't join the cluster.
| **#** | **Parameter** | **Definition** |
| ----- | ------------------ | ------------------------------------------- |
| 1 | statusInterval | The interval by which dnode reports its status to mnode |
| 2 | timezone | Timezone |
| 3 | locale | System region and encoding |
| 4 | charset | Character set |
| **#** | **Parameter** | **Definition** |
| ----- | ---------------- | ----------------------------------------------------------------------------- |
| 1 | statusInterval | The interval by which dnode reports its status to mnode |
| 2 | timezone | Timezone |
| 3 | locale | System region and encoding |
| 4 | charset | Character set |
| 5 | ttlChangeOnWrite | Whether the ttl expiration time changes with the table modification operation |
## Start Cluster
......@@ -97,7 +98,7 @@ Then, on the first dnode i.e. h1.tdengine.com in our example, use TDengine CLI `
CREATE DNODE "h2.taos.com:6030";
````
This adds the end point of the new dnode (from Step 4) into the end point list of the cluster. In the command "fqdn:port" should be quoted using double quotes. Change `"h2.taos.com:6030"` to the end point of your new dnode.
This adds the end point of the new dnode (from Step 4) into the end point list of the cluster. In the command "fqdn:port" should be quoted using double quotes. Change `"h2.taos.com:6030"` to the end point of your new dnode.
Then on the first dnode h1.tdengine.com, execute `show dnodes` in `taos`
......
......@@ -43,6 +43,8 @@ In TDengine, the data types below can be used when specifying a column or tag.
| 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type. |
| 16 | VARCHAR | User-defined | Alias of BINARY |
| 17 | GEOMETRY | User-defined | Geometry |
| 18 | VARBINARY | User-defined | Binary data with variable length
:::note
- Each row of the table cannot be longer than 48KB (64KB since version 3.0.5.0) (note that each BINARY/NCHAR/GEOMETRY column takes up an additional 2 bytes of storage space).
......@@ -57,7 +59,7 @@ In TDengine, the data types below can be used when specifying a column or tag.
| 3 | POLYGON((1.0 1.0, 2.0 2.0, 1.0 1.0)) | 13+3*16 | 13+4094*16 | +16 |
- Numeric values in SQL statements will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number.
- VARBINARY is a data type that stores binary data, with a maximum length of 65517 bytes and a maximum length of 16382 bytes for tag columns. Binary data can be written through SQL or schemaless (which needs to be converted to a string starting with \x), or written through stmt (which can directly use binary). Display starting with hexadecimal starting with \x.
:::
## Constants
......
......@@ -9,7 +9,7 @@ description: This document describes how to query data in TDengine.
```sql
SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW() | TODAY() | TIMEZONE() | CURRENT_USER() | USER() }
SELECT [DISTINCT] select_list
SELECT [hints] [DISTINCT] [TAGS] select_list
from_clause
[WHERE condition]
[partition_by_clause]
......@@ -21,6 +21,11 @@ SELECT [DISTINCT] select_list
[LIMIT limit_val [OFFSET offset_val]]
[>> export_file]
hints: /*+ [hint([hint_param_list])] [hint([hint_param_list])] */
hint:
BATCH_SCAN | NO_BATCH_SCAN
select_list:
select_expr [, select_expr] ...
......@@ -70,6 +75,29 @@ order_expr:
{expr | position | c_alias} [DESC | ASC] [NULLS FIRST | NULLS LAST]
```
## Hints
Hints are a means of user control over query optimization for individual statements. Hints will be ignore automatically if they are not applicable to the current query statement. The specific instructions are as follows:
- Hints syntax starts with `/*+` and ends with `*/`, spaces are allowed before or after.
- Hints syntax can only follow the SELECT keyword.
- Each hints can contain multiple hint, separated by spaces. When multiple hints conflict or are identical, whichever comes first takes effect.
- When an error occurs with a hint in hints, the effective hint before the error is still valid, and the current and subsequent hints are ignored.
- hint_param_list are arguments to each hint, which varies according to each hint.
The list of currently supported Hints is as follows:
| **Hint** | **Params** | **Comment** | **Scopt** |
| :-----------: | -------------- | -------------------------- | -------------------------- |
| BATCH_SCAN | None | Batch table scan | JOIN statment for stable |
| NO_BATCH_SCAN | None | Sequential table scan | JOIN statment for stable |
For example:
```sql
SELECT /*+ BATCH_SCAN() */ a.ts FROM stable1 a, stable2 b where a.tag0 = b.tag0 and a.ts = b.ts;
```
## Lists
A query can be performed on some or all columns. Data and tag columns can all be included in the SELECT list.
......@@ -197,6 +225,14 @@ The \_IROWTS pseudocolumn can only be used with INTERP function. This pseudocolu
select _irowts, interp(current) from meters range('2020-01-01 10:00:00', '2020-01-01 10:30:00') every(1s) fill(linear);
```
### TAGS Query
The TAGS keyword returns only tag columns from all child tables when only tag columns are specified. One row containing tag columns is returned for each child table.
```sql
SELECT TAGS tag_name [, tag_name ...] FROM stb_name
```
## Query Objects
`FROM` can be followed by a number of tables or super tables, or can be followed by a sub-query.
......
......@@ -49,3 +49,5 @@ You can also add filter conditions to limit the results.
6. You can' create index on a normal table or a child table.
7. If the unique values of a tag column are too few, it's better not to create index on such tag columns, the benefit would be very small.
8. The newly created super table will randomly generate an index name for the first column of tags, which is composed of the name tag0 column with 23 random bytes, and can be rebuilt or dropped.
......@@ -402,7 +402,7 @@ CAST(expr AS type_name)
**Return value type**: The type specified by parameter `type_name`
**Applicable data types**: All data types except JSON
**Applicable data types**: All data types except JSON and VARBINARY. If type_name is VARBINARY, expr can only be VARCHAR.
**Nested query**: It can be used in both the outer query and inner query in a nested query.
......@@ -698,7 +698,7 @@ ELAPSED(ts_primary_key [, time_unit])
LEASTSQUARES(expr, start_val, step_val)
```
**Description**: The linear regression function of the specified column and the timestamp column (primary key), `start_val` is the initial value and `step_val` is the step value.
**Description**: The linear regression function of a specified column, `start_val` is the initial value and `step_val` is the step value.
**Return value type**: A string in the format of "(slope, intercept)"
......
......@@ -168,3 +168,11 @@ All [scalar functions](../function/#scalar-functions) are available in stream pr
- [unique](../function/#unique)
- [mode](../function/#mode)
## Pause\Resume stream
1.pause stream
PAUSE STREAM [IF EXISTS] stream_name;
If "IF EXISTS" is not specified and the stream does not exist, an error will be reported; If "IF EXISTS" is specified and the stream does not exist, success is returned; If the stream exists, paused all stream tasks.
2.resume stream
RESUME STREAM [IF EXISTS] [IGNORE UNTREATED] stream_name;
If "IF EXISTS" is not specified and the stream does not exist, an error will be reported. If "IF EXISTS" is specified and the stream does not exist, success is returned; If the stream exists, all of the stream tasks will be resumed. If "IGNORE UntREATED" is specified, data written during the pause period of stream is ignored when resuming stream.
......@@ -19,6 +19,9 @@ index_option:
functions:
function [, function] ...
```
### tag Indexing
[tag index](../tag-index)
### SMA Indexing
......
......@@ -36,6 +36,7 @@ REST connection supports all platforms that can run Java.
| taos-jdbcdriver version | major changes | TDengine version |
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: |
| 3.2.5 | Subscription add committed() and assignment() method | 3.1.0.3 or later |
| 3.2.4 | Subscription add the enable.auto.commit parameter and the unsubscribe() method in the WebSocket connection | - |
| 3.2.3 | Fixed resultSet data parsing failure in some cases | - |
| 3.2.2 | Subscription add seek function | 3.0.5.0 or later |
......@@ -1019,14 +1020,19 @@ while(true) {
#### Assignment subscription Offset
```java
// get topicPartition
Set<TopicPartition> assignment() throws SQLException;
// get offset
long position(TopicPartition partition) throws SQLException;
Map<TopicPartition, Long> position(String topic) throws SQLException;
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
Map<TopicPartition, OffsetAndMetadata> committed(Set<TopicPartition> partitions) throws SQLException;
// Overrides the fetch offsets that the consumer will use on the next poll(timeout).
void seek(TopicPartition partition, long offset) throws SQLException;
void seekToBeginning(Collection<TopicPartition> partitions) throws SQLException;
void seekToEnd(Collection<TopicPartition> partitions) throws SQLException;
```
Example usage is as follows.
......@@ -1052,6 +1058,18 @@ try (TaosConsumer<ResultBean> consumer = new TaosConsumer<>(properties)) {
}
```
#### Commit offset
If `enable.auto.commit` is false, offset can be submitted manually.
```java
void commitSync() throws SQLException;
void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets) throws SQLException;
// async commit only support jni connection
void commitAsync(OffsetCommitCallback<V> callback) throws SQLException;
void commitAsync(Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback<V> callback) throws SQLException;
```
#### Close subscriptions
```java
......@@ -1174,7 +1192,6 @@ public abstract class ConsumerLoop {
config.setProperty("client.id", "1");
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
config.setProperty("value.deserializer.encoding", "UTF-8");
config.setProperty("experimental.snapshot.enable", "true");
this.consumer = new TaosConsumer<>(config);
this.topics = Collections.singletonList("topic_speed");
......
......@@ -13,7 +13,7 @@ After TDengine starts, it automatically writes many metrics in specific interval
To deploy TDinsight, we need
- a single-node TDengine server or a multi-node TDengine cluster and a [Grafana] server are required. This dashboard requires TDengine 3.0.1.0 and above, with the monitoring feature enabled. For detailed configuration, please refer to [TDengine monitoring configuration](../config/#monitoring-parameters).
- taosAdapter has been installed and running, please refer to [taosAdapter](../taosadapter).
- taosKeeper has been installed and running, please refer to [taosKeeper](../taosKeeper).
- taosKeeper has been installed and running, please note the monitor-related items in taos.cfg file need be configured. Refer to [taosKeeper](../taosKeeper) for details.
Please record
- The endpoint of taosAdapter REST service, for example `http://tdengine.local:6041`
......@@ -80,7 +80,7 @@ chmod +x TDinsight.sh
./TDinsight.sh
```
This script will automatically download the latest [Grafana TDengine data source plugin](https://github.com/taosdata/grafanaplugin/releases/latest) and [TDinsight dashboard](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsightV3.json) with configurable parameters for command-line options to the [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) configuration file to automate deployment and updates, etc. With the alert setting options provided by this script, you can also get built-in support for AliCloud SMS alert notifications.
This script will automatically download the latest [Grafana TDengine data source plugin](https://github.com/taosdata/grafanaplugin/releases/latest) and [TDinsight dashboard](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsightV3.json) with configurable parameters for command-line options to the [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) configuration file to automate deployment and updates, etc.
Assume you use TDengine and Grafana's default services on the same host. Run `. /TDinsight.sh` and open the Grafana browser window to see the TDinsight dashboard.
......@@ -112,9 +112,6 @@ Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 syste
-i, --tdinsight-uid <string> Replace with a non-space ASCII code as the dashboard id. [default: tdinsight]
-t, --tdinsight-title <string> Dashboard title. [default: TDinsight]
-e, --tdinsight-editable If the provisioning dashboard could be editable. [default: false]
-E, --external-notifier <string> Apply external notifier uid to TDinsight dashboard.
```
Most command-line options can take effect the same as environment variables.
......@@ -132,7 +129,10 @@ Most command-line options can take effect the same as environment variables.
| -i | --tdinsight-uid | TDINSIGHT_DASHBOARD_UID | TDinsight `uid` of the dashboard. [default: tdinsight] |
| -t | --tdinsight-title | TDINSIGHT_DASHBOARD_TITLE | TDinsight dashboard title. [Default: TDinsight] | -e | -tdinsight-title
| -e | --tdinsight-editable | TDINSIGHT_DASHBOARD_EDITABLE | If the dashboard is configured to be editable. [Default: false] | -e | --external
| -E | --external-notifier | EXTERNAL_NOTIFIER | Apply the external notifier uid to the TDinsight dashboard. | -s
:::note
The `-E` option is deprecated. We use Grafana unified alerting function instead.
:::
Suppose you start a TDengine database on host `tdengine` with HTTP API port `6041`, user `root1`, and password `pass5ord`. Execute the script.
......@@ -140,18 +140,6 @@ Suppose you start a TDengine database on host `tdengine` with HTTP API port `604
sudo . /TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord
```
We provide a "-E" option to configure TDinsight to use the existing Notification Channel from the command line. Assuming your Grafana user and password is `admin:admin`, use the following command to get the `uid` of an existing notification channel.
```bash
curl --no-progress-meter -u admin:admin http://localhost:3000/api/alert-notifications | jq
```
Use the `uid` value obtained above as `-E` input.
```bash
./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
```
If you want to monitor multiple TDengine clusters, you need to set up numerous TDinsight dashboards. Setting up non-default TDinsight requires some changes: the `-n` `-i` `-t` options need to be changed to non-default names, and `-N` and `-L` should also be changed if using the built-in SMS alerting feature.
```bash
......
......@@ -32,8 +32,10 @@ All data in tag_set is automatically converted to the NCHAR data type and does n
In the schemaless writing data line protocol, each data item in the field_set needs to be described with its data type. Let's explain in detail:
- If there are English double quotes on both sides, it indicates the VARCHAR(N) type. For example, `"abc"`.
- If there are double quotes on both sides and an L prefix, it means NCHAR(N) type. For example, `L"error message"`.
- If there are English double quotes on both sides, it indicates the VARCHAR type. For example, `"abc"`.
- If there are double quotes on both sides and a L/l prefix, it means NCHAR type. For example, `L"error message"`.
- If there are double quotes on both sides and a G/g prefix, it means GEOMETRY type. For example `G"Point(4.343 89.342)"`.
- If there are double quotes on both sides and a B/b prefix, it means VARBINARY type. Hexadecimal start with \x or string can be used in double quotes. For example `B"\x98f46e"` `B"hello"`.
- Spaces, equals sign (=), comma (,), double quote ("), and backslash (\\) need to be escaped with a backslash (\\) in front. (All refer to the ASCII character). The rules are as follows:
| **Serial number** | **Element** | **Escape characters** |
......@@ -110,7 +112,7 @@ You can configure smlChildTableName in taos.cfg to specify table names, for exam
Note: TDengine 3.0.3.0 and later automatically detect whether order is consistent. This parameter is no longer used.
9. Due to the fact that SQL table names do not support period (.), schemaless has also processed period (.). If there is a period (.) in the table name automatically created by schemaless, it will be automatically replaced with an underscore (\_). If you manually specify a sub table name, if there is a dot (.) in the sub table name, it will also be converted to an underscore (\_)
10. Taos.cfg adds the configuration of smlTsDefaultName (with a string value), which only works on the client side. After configuration, the time column name of the schemaless automatic table creation can be set through this configuration. If not configured, defaults to _ts.
11. Super table name or child table name are case sensitive.
:::tip
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48 KB(64 KB since version 3.0.5.0) and the total length of a tag value cannot exceed 16 KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
:::
......
......@@ -218,11 +218,11 @@ The example to query the average system memory usage for the specified interval
### Importing the Dashboard
You can install TDinsight dashboard in data source configuration page (like `http://localhost:3000/datasources/edit/1/dashboards`) as a monitoring visualization tool for TDengine cluster. Ensure that you use TDinsight for 3.x.
You can install TDinsight dashboard in data source configuration page (like `http://localhost:3000/datasources/edit/1/dashboards`) as a monitoring visualization tool for TDengine cluster. Ensure that you use TDinsight for 3.x. Please note TDinsight for 3.x needs to configure and run taoskeeper correctly. Check the [TDinsight User Manual](/reference/tdinsight/) for the details.
![TDengine Database Grafana plugine import dashboard](./import_dashboard.webp)
A dashboard for TDengine 2.x has been published on Grafana: [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)). Check the [TDinsight User Manual](/reference/tdinsight/) for the details.
A dashboard for TDengine 2.x has been published on Grafana: [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)).
For more dashboards using TDengine data source, [search here in Grafana](https://grafana.com/grafana/dashboards/?dataSource=tdengine-datasource). Here is a sub list:
......
---
sidebar_label: Seeq
title: Seeq
description: How to use Seeq and TDengine to perform time series data analysis
---
# How to use Seeq and TDengine to perform time series data analysis
## Introduction
Seeq is an advanced analytics software for the manufacturing industry and the Industrial Internet of Things (IIoT). Seeq supports the use of machine learning innovations within process manufacturing organizations. These capabilities enable organizations to deploy their own or third-party machine learning algorithms into advanced analytics applications used by frontline process engineers and subject matter experts, thus extending the efforts of a single data scientist to many frontline workers.
With the TDengine Java connector, Seeq effortlessly supports querying time series data provided by TDengine and offers functionalities such as data visualization, analysis, and forecasting.
### Install Seeq
Please download Seeq Server and Seeq Data Lab software installation package from the [Seeq official website](https://www.seeq.com/customer-download).
### Install and start Seeq Server
```
tar xvzf seeq-server-xxx.tar.gz
cd seeq-server-installer
sudo ./install
sudo seeq service enable
sudo seeq start
```
### Install and start Seeq Data Lab Server
Seeq Data Lab needs to be installed on a separate server from Seeq Server and connected to Seeq Server through configuration. For detailed installation and configuration instructions, please refer to [the official documentation](https://support.seeq.com/space/KB/1034059842).
```
tar xvf seeq-data-lab-<version>-64bit-linux.tar.gz
sudo seeq-data-lab-installer/install -f /opt/seeq/seeq-data-lab -g /var/opt/seeq -u seeq
sudo seeq config set Network/DataLab/Hostname localhost
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231)
sudo seeq config set Network/Hostname <value> # the host IP or URL of the main Seeq Server
# If the main Seeq server is configured to listen over HTTPS
sudo seeq config set Network/Webserver/SecurePort 443 # the secure port of the main Seeq Server (usually 443)
# If the main Seeq server is NOT configured to listen over HTTPS
sudo seeq config set Network/Webserver/Port <value>
#On the main Seeq server, open a Seeq Command Prompt and set the hostname of the Data Lab server:
sudo seeq config set Network/DataLab/Hostname <value> # the host IP (not URL) of the Data Lab server
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231
```
### Install TDengine on-premise instance
See [Quick Install from Package](../../get-started).
### Or use TDengine Cloud
Register for a [TDengine Cloud](https://cloud.tdengine.com) account and log in to your account.
## Make Seeq be able to access TDengine
1. Get data location configuration
```
sudo seeq config get Folders/Data
```
2. Download TDengine Java connector from maven.org. Please use the latest version (Current is 3.2.5, https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.5/taos-jdbcdriver-3.2.5-dist.jar).
3. Restart Seeq server
```
sudo seeq restart
```
4. Input License
Use a browser to access ip:34216 and input the license according to the guide.
## How to use Seeq to analyze time-series data that TDengine serves
This chapter demonstrates how to use Seeq software in conjunction with TDengine for time series data analysis.
### Scenario Overview
The example scenario involves a power system where users collect electricity consumption data from metering devices at a power station on a daily basis. This data is stored in a TDengine cluster. The user now wants to predict how the electricity consumption will develop and purchase additional equipment to support it. The electricity consumption varies with monthly orders, and seasonal variations also affect the power consumption. Since the city is located in the Northern Hemisphere, more electricity is consumed during the summer. We will use simulated data to reflect these assumptions.
### Schema
```
CREATE STABLE meters (ts TIMESTAMP, num INT, temperature FLOAT, goods INT) TAGS (device NCHAR(20));
CREATE TABLE goods (ts1 TIMESTAMP, ts2 TIMESTAMP, goods FLOAT);
```
![Seeq demo schema](./seeq/seeq-demo-schema.webp)
### Mock data
```
python mockdata.py
taos -s "insert into power.goods select _wstart, _wstart + 10d, avg(goods) from power.meters interval(10d);"
```
The source code is hosted at [GitHub Repository](https://github.com/sangshuduo/td-forecasting).
### Using Seeq for data analysis
#### Data Source configuration
Please login with Seeq administrator and create a few data sources as following.
- Power
```
{
"QueryDefinitions": [
{
"Name": "PowerNum",
"Type": "SIGNAL",
"Sql": "SELECT ts, num FROM meters",
"Enabled": true,
"TestMode": false,
"TestQueriesDuringSync": true,
"InProgressCapsulesEnabled": false,
"Variables": null,
"Properties": [
{
"Name": "Name",
"Value": "Num",
"Sql": null,
"Uom": "string"
},
{
"Name": "Interpolation Method",
"Value": "linear",
"Sql": null,
"Uom": "string"
},
{
"Name": "Maximum Interpolation",
"Value": "2day",
"Sql": null,
"Uom": "string"
}
],
"CapsuleProperties": null
}
],
"Type": "GENERIC",
"Hostname": null,
"Port": 0,
"DatabaseName": null,
"Username": "root",
"Password": "taosdata",
"InitialSql": null,
"TimeZone": null,
"PrintRows": false,
"UseWindowsAuth": false,
"SqlFetchBatchSize": 100000,
"UseSSL": false,
"JdbcProperties": null,
"GenericDatabaseConfig": {
"DatabaseJdbcUrl": "jdbc:TAOS-RS://127.0.0.1:6041/power?user=root&password=taosdata",
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
"ResolutionInNanoseconds": 1000,
"ZonedColumnTypes": []
}
}
```
- Goods
```
{
"QueryDefinitions": [
{
"Name": "PowerGoods",
"Type": "CONDITION",
"Sql": "SELECT ts1, ts2, goods FROM power.goods",
"Enabled": true,
"TestMode": false,
"TestQueriesDuringSync": true,
"InProgressCapsulesEnabled": false,
"Variables": null,
"Properties": [
{
"Name": "Name",
"Value": "Goods",
"Sql": null,
"Uom": "string"
},
{
"Name": "Maximum Duration",
"Value": "10days",
"Sql": null,
"Uom": "string"
}
],
"CapsuleProperties": [
{
"Name": "goods",
"Value": "${columnResult}",
"Column": "goods",
"Uom": "string"
}
]
}
],
"Type": "GENERIC",
"Hostname": null,
"Port": 0,
"DatabaseName": null,
"Username": "root",
"Password": "taosdata",
"InitialSql": null,
"TimeZone": null,
"PrintRows": false,
"UseWindowsAuth": false,
"SqlFetchBatchSize": 100000,
"UseSSL": false,
"JdbcProperties": null,
"GenericDatabaseConfig": {
"DatabaseJdbcUrl": "jdbc:TAOS-RS://127.0.0.1:6041/power?user=root&password=taosdata",
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
"ResolutionInNanoseconds": 1000,
"ZonedColumnTypes": []
}
}
```
- Temperature
```
{
"QueryDefinitions": [
{
"Name": "PowerNum",
"Type": "SIGNAL",
"Sql": "SELECT ts, temperature FROM meters",
"Enabled": true,
"TestMode": false,
"TestQueriesDuringSync": true,
"InProgressCapsulesEnabled": false,
"Variables": null,
"Properties": [
{
"Name": "Name",
"Value": "Temperature",
"Sql": null,
"Uom": "string"
},
{
"Name": "Interpolation Method",
"Value": "linear",
"Sql": null,
"Uom": "string"
},
{
"Name": "Maximum Interpolation",
"Value": "2day",
"Sql": null,
"Uom": "string"
}
],
"CapsuleProperties": null
}
],
"Type": "GENERIC",
"Hostname": null,
"Port": 0,
"DatabaseName": null,
"Username": "root",
"Password": "taosdata",
"InitialSql": null,
"TimeZone": null,
"PrintRows": false,
"UseWindowsAuth": false,
"SqlFetchBatchSize": 100000,
"UseSSL": false,
"JdbcProperties": null,
"GenericDatabaseConfig": {
"DatabaseJdbcUrl": "jdbc:TAOS-RS://127.0.0.1:6041/power?user=root&password=taosdata",
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
"ResolutionInNanoseconds": 1000,
"ZonedColumnTypes": []
}
}
```
#### Launch Seeq Workbench
Please login to Seeq server with IP:port and create a new Seeq Workbench, then select data sources and choose the correct tools to do data visualization and analysis. Please refer to [the official documentation](https://support.seeq.com/space/KB/146440193/Seeq+Workbench) for the details.
![Seeq Workbench](./seeq/seeq-demo-workbench.webp)
#### Use Seeq Data Lab Server for advanced data analysis
Please login to the Seeq service with IP:port and create a new Seeq Data Lab. Then you can use advanced tools including Python environment and machine learning add-ons for more complex analysis.
```Python
from seeq import spy
spy.options.compatibility = 189
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import mlforecast
import lightgbm as lgb
from mlforecast.target_transforms import Differences
from sklearn.linear_model import LinearRegression
ds = spy.search({'ID': "8C91A9C7-B6C2-4E18-AAAF-XXXXXXXXX"})
print(ds)
sig = ds.loc[ds['Name'].isin(['Num'])]
print(sig)
data = spy.pull(sig, start='2015-01-01', end='2022-12-31', grid=None)
print("data.info()")
data.info()
print(data)
#data.plot()
print("data[Num].info()")
data['Num'].info()
da = data['Num'].index.tolist()
#print(da)
li = data['Num'].tolist()
#print(li)
data2 = pd.DataFrame()
data2['ds'] = da
print('1st data2 ds info()')
data2['ds'].info()
#data2['ds'] = pd.to_datetime(data2['ds']).to_timestamp()
data2['ds'] = pd.to_datetime(data2['ds']).astype('int64')
data2['y'] = li
print('2nd data2 ds info()')
data2['ds'].info()
print(data2)
data2.insert(0, column = "unique_id", value="unique_id")
print("Forecasting ...")
forecast = mlforecast.MLForecast(
models = lgb.LGBMRegressor(),
freq = 1,
lags=[365],
target_transforms=[Differences([365])],
)
forecast.fit(data2)
predicts = forecast.predict(365)
pd.concat([data2, predicts]).set_index("ds").plot(title = "current data with forecast")
plt.show()
```
Example output:
![Seeq forecast result](./seeq/seeq-forecast-result.webp)
### How to configure Seeq data source to access TDengine Cloud
Configuring a Seeq data source connection to TDengine Cloud or a local installation instance does not have any essential differences. After logging in to TDengine Cloud, select "Programming - Java" and copy the JDBC URL string with the token provided. Then, use this JDBC URL string to fill in the DatabaseJdbcUrl value in the Seeq Data Source configuration.
Please note that when using TDengine Cloud, you need to specify the database name in your SQL commands.
#### The data source of TDengine Cloud example
```
{
"QueryDefinitions": [
{
"Name": "CloudVoltage",
"Type": "SIGNAL",
"Sql": "SELECT ts, voltage FROM test.meters",
"Enabled": true,
"TestMode": false,
"TestQueriesDuringSync": true,
"InProgressCapsulesEnabled": false,
"Variables": null,
"Properties": [
{
"Name": "Name",
"Value": "Voltage",
"Sql": null,
"Uom": "string"
},
{
"Name": "Interpolation Method",
"Value": "linear",
"Sql": null,
"Uom": "string"
},
{
"Name": "Maximum Interpolation",
"Value": "2day",
"Sql": null,
"Uom": "string"
}
],
"CapsuleProperties": null
}
],
"Type": "GENERIC",
"Hostname": null,
"Port": 0,
"DatabaseName": null,
"Username": "root",
"Password": "taosdata",
"InitialSql": null,
"TimeZone": null,
"PrintRows": false,
"UseWindowsAuth": false,
"SqlFetchBatchSize": 100000,
"UseSSL": false,
"JdbcProperties": null,
"GenericDatabaseConfig": {
"DatabaseJdbcUrl": "jdbc:TAOS-RS://gw.cloud.taosdata.com?useSSL=true&token=41ac9d61d641b6b334e8b76f45f5a8XXXXXXXXXX",
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
"ResolutionInNanoseconds": 1000,
"ZonedColumnTypes": []
}
}
```
#### Seeq Workbench with TDengine Cloud data source example
![Seeq workbench with TDengine Cloud](./seeq/seeq-workbench-with-tdengine-cloud.webp)
## Conclusion
By integrating Seeq and TDengine, it is possible to leverage the efficient storage and querying performance of TDengine while also benefiting from Seeq's powerful data visualization and analysis capabilities provided to users.
This integration allows users to take advantage of TDengine's high-performance time-series data storage and retrieval, ensuring efficient handling of large volumes of data. At the same time, Seeq provides advanced analytics features such as data visualization, anomaly detection, correlation analysis, and predictive modeling, enabling users to gain valuable insights and make data-driven decisions.
Together, Seeq and TDengine provide a comprehensive solution for time series data analysis in diverse industries such as manufacturing, IIoT, and power systems. The combination of efficient data storage and advanced analytics empowers users to unlock the full potential of their time series data, driving operational improvements, and enabling predictive and prescriptive analytics applications.
......@@ -78,6 +78,7 @@ int printRow(char *str, TAOS_ROW row, TAOS_FIELD *fields, int numFields) {
} break;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_VARBINARY:
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_GEOMETRY: {
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
......
......@@ -76,6 +76,7 @@ int printRow(char *str, TAOS_ROW row, TAOS_FIELD *fields, int numFields) {
} break;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_VARBINARY:
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_GEOMETRY: {
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
......
......@@ -10,7 +10,7 @@ TDengine 充分利用了时序数据的特点,提出了“一个数据采集
如果你是开发工程师,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要复制粘贴示例代码,针对自己的应用稍作改动,就能跑起来。
我们已经生活在大数据时代,纵向扩展已经无法满足日益增长的业务需求,任何系统都必须具有水平扩展的能力,集群成为大数据以及 Database 系统的不可缺失功能。TDengine 团队不仅实现了集群功能,而且将这一重要核心功能开源。怎么部署、管理和维护 TDengine 集群,请仔细参考[部署集群](./deployment)一章。
我们已经生活在大数据时代,纵向扩展已经无法满足日益增长的业务需求,任何系统都必须具有水平扩展的能力,集群成为大数据以及 Database 系统的不可缺失功能。TDengine 团队不仅实现了集群功能,而且将这一重要核心功能开源。怎么部署、管理和维护 TDengine 集群,请仔细参考[部署集群]一章。
TDengine 采用 SQL 作为查询语言,大大降低学习成本、降低迁移成本,但同时针对时序数据场景,又做了一些扩展,以支持插值、降采样、时间加权平均等操作。[SQL 手册](./taos-sql)一章详细描述了 SQL 语法、详细列出了各种支持的命令和函数。
......@@ -18,8 +18,6 @@ TDengine 采用 SQL 作为查询语言,大大降低学习成本、降低迁移
如果你对 TDengine 的外围工具、REST API、各种编程语言的连接器(Connector)想做更多详细了解,请看[参考指南](./reference)一章。
如果你对 TDengine 的内部架构设计很有兴趣,欢迎仔细阅读[技术内幕](./tdinternal)一章,里面对集群的设计、数据分区、分片、写入、读出、查询、聚合查询的流程都做了详细的介绍。如果你想研读 TDengine 代码甚至贡献代码,请一定仔细读完这一章。
最后,作为一个开源软件,欢迎大家的参与。如果发现文档有任何错误、描述不清晰的地方,请在每个页面的最下方,点击“编辑本文档”直接进行修改。
Together, we make a difference!
......@@ -6,7 +6,14 @@ toc_max_heading_level: 2
TDengine 是一款开源、高性能、云原生的[时序数据库](https://tdengine.com/tsdb/),且针对物联网、车联网、工业互联网、金融、IT 运维等场景进行了优化。TDengine 的代码,包括集群功能,都在 GNU AGPL v3.0 下开源。除核心的时序数据库功能外,TDengine 还提供[缓存](../develop/cache/)[数据订阅](../develop/tmq)[流式计算](../develop/stream)等其它功能以降低系统复杂度及研发和运维成本。
本章节介绍 TDengine 的主要功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine 有个整体的了解。
本章节介绍 TDengine 的主要产品和功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine 有个整体的了解。
## 主要产品
TDengine 有三个主要产品:TDengine Pro (即 TDengine 企业版),TDengine Cloud,和 TDengine OSS,关于它们的具体定义请参考
- [TDengine 企业版](https://www.taosdata.com/tdengine-pro)
- [TDengine 云服务](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn)
- [TDengine 开源版](https://www.taosdata.com/tdengine-oss)
## 主要功能
......
......@@ -26,24 +26,19 @@ import CDemo from "./_sub_c.mdx";
本文档不对消息队列本身的基础知识做介绍,如果需要了解,请自行搜索。
说明(以c接口为例):
- 一个消费组消费同一个topic下的所有数据,不同消费组之间相互独立;
- 一个消费组消费同一个topic所有的vgroup,消费组可由多个消费者组成,但一个vgroup仅被一个消费者消费,如果消费者数量超过了vgroup数量,多余的消费者不消费数据;
- 在服务端每个vgroup仅保存一个offset,每个vgroup的offset是单调递增的,但不一定连续。各个vgroup的offset之间没有关联;
- 每次poll服务端会返回一个结果block,该block属于一个vgroup,可能包含多个wal版本的数据,可以通过 tmq_get_vgroup_offset 接口获得是该block第一条记录的offset;
- 一个消费组如果从未commit过offset,当其成员消费者重启重新拉取数据时,均从参数auto.offset.reset设定值开始消费;在一个消费者生命周期中,客户端本地记录了最近一次拉取数据的offset,不会拉取重复数据;
- 消费者如果异常终止(没有调用tmq_close),需等约12秒后触发其所属消费组rebalance,该消费者在服务端状态变为LOST,约1天后该消费者自动被删除;正常退出,退出后就会删除消费者;新增消费者,需等约2秒触发rebalance,该消费者在服务端状态变为ready;
- 消费组rebalance会对该组所有ready状态的消费者成员重新进行vgroup分配,消费者仅能对自己负责的vgroup进行assignment/seek/commit/poll操作;
- 消费者可利用 tmq_position 获得当前消费的offset,并seek到指定offset,重新消费;
- seek将position指向指定offset,不执行commit操作,一旦seek成功,可poll拉取指定offset及以后的数据;
- seek 操作之前须调用 tmq_get_topic_assignment 接口获取该consumer的vgroup ID和offset范围。seek 操作会检测vgroup ID 和 offset是否合法,如非法将报错;
- position是获取当前的消费位置,是下次要取的位置,不是当前消费到的位置
- commit是提交消费位置,不带参数的话,是提交当前消费位置(下次要取的位置,不是当前消费到的位置),带参数的话,是提交参数里的位置(也即下次退出重启后要取的位置)
- seek是设置consumer消费位置,seek到哪,position就返回哪,都是下次要取的位置
- seek不会影响commit,commit不影响seek,相互独立,两个是不同的概念
- begin接口为wal 第一条数据的offset,end 接口为wal 最后一条数据的offset + 1
- tmq_get_vgroup_offset接口获取的是记录所在结果block块里的第一条数据的offset,当seek至该offset时,将消费到这个block里的全部数据。参见第四点;
- 由于存在 WAL 过期删除机制,即使seek 操作成功,poll数据时有可能offset已失效。如果poll 的offset 小于 WAL 最小版本号,将会从WAL最小版本号消费;
- 数据订阅是从 WAL 消费数据,如果一些 WAL 文件被基于 WAL 保留策略删除,则已经删除的 WAL 文件中的数据就无法再消费到。需要根据业务需要在创建数据库时合理设置 `WAL_RETENTION_PERIOD` 或 `WAL_RETENTION_SIZE` ,并确保应用及时消费数据,这样才不会产生数据丢失的现象。数据订阅的行为与 Kafka 等广泛使用的消息队列类产品的行为相似;
1. 一个消费组消费同一个topic下的所有数据,不同消费组之间相互独立;
2. 一个消费组消费同一个topic所有的vgroup,消费组可由多个消费者组成,但一个vgroup仅被一个消费者消费,如果消费者数量超过了vgroup数量,多余的消费者不消费数据;
3. 在服务端每个vgroup仅保存一个offset,每个vgroup的offset是单调递增的,但不一定连续。各个vgroup的offset之间没有关联;
4. 每次poll服务端会返回一个结果block,该block属于一个vgroup,可能包含多个wal版本的数据,可以通过 tmq_get_vgroup_offset 接口获得是该block第一条记录的offset;
5. 一个消费组如果从未commit过offset,当其成员消费者重启重新拉取数据时,均从参数auto.offset.reset设定值开始消费;在一个消费者生命周期中,客户端本地记录了最近一次拉取数据的offset,不会拉取重复数据;
6. 消费者如果异常终止(没有调用tmq_close),需等约12秒后触发其所属消费组rebalance,该消费者在服务端状态变为LOST,约1天后该消费者自动被删除;正常退出,退出后就会删除消费者;新增消费者,需等约2秒触发rebalance,该消费者在服务端状态变为ready;
7. 消费组rebalance会对该组所有ready状态的消费者成员重新进行vgroup分配,消费者仅能对自己负责的vgroup进行assignment/seek/commit/poll操作;
8. 消费者可利用 tmq_position 获得当前消费的offset,并seek到指定offset,重新消费;
9. seek将position指向指定offset,不执行commit操作,一旦seek成功,可poll拉取指定offset及以后的数据;
10. seek 操作之前须调用 tmq_get_topic_assignment 接口获取该consumer的vgroup ID和offset范围。seek 操作会检测vgroup ID 和 offset是否合法,如非法将报错;
11. tmq_get_vgroup_offset接口获取的是记录所在结果block块里的第一条数据的offset,当seek至该offset时,将消费到这个block里的全部数据。参见第四点;
12. 由于存在 WAL 过期删除机制,即使seek 操作成功,poll数据时有可能offset已失效。如果poll 的offset 小于 WAL 最小版本号,将会从WAL最小版本号消费;
13. 数据订阅是从 WAL 消费数据,如果一些 WAL 文件被基于 WAL 保留策略删除,则已经删除的 WAL 文件中的数据就无法再消费到。需要根据业务需要在创建数据库时合理设置 `WAL_RETENTION_PERIOD` 或 `WAL_RETENTION_SIZE` ,并确保应用及时消费数据,这样才不会产生数据丢失的现象。数据订阅的行为与 Kafka 等广泛使用的消息队列类产品的行为相似;
## 主要数据结构和 API
......@@ -60,17 +55,17 @@ import CDemo from "./_sub_c.mdx";
typedef void(tmq_commit_cb(tmq_t *tmq, int32_t code, void *param));
typedef enum tmq_conf_res_t {
TMQ_CONF_UNKNOWN = -2,
TMQ_CONF_INVALID = -1,
TMQ_CONF_OK = 0,
} tmq_conf_res_t;
TMQ_CONF_UNKNOWN = -2,
TMQ_CONF_INVALID = -1,
TMQ_CONF_OK = 0,
} tmq_conf_res_t;
typedef struct tmq_topic_assignment {
int32_t vgId;
int64_t currentOffset;
int64_t begin;
int64_t end; // The last version of wal + 1
} tmq_topic_assignment;
int32_t vgId;
int64_t currentOffset;
int64_t begin;
int64_t end;
} tmq_topic_assignment;
DLL_EXPORT tmq_conf_t *tmq_conf_new();
DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value);
......@@ -89,24 +84,24 @@ import CDemo from "./_sub_c.mdx";
DLL_EXPORT int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics);
DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout);
DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq);
DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); //Commit the msg’s offset + 1
DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg);
DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
DLL_EXPORT int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
DLL_EXPORT void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param);
DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,int32_t *numOfAssignment);
DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment);
DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); // The current offset is the offset of the last consumed message + 1
DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId);
DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId);
DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);
DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res);
DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res);
DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res); // Get current offset of the result
DLL_EXPORT const char *tmq_err2str(int32_t code);
DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res);
DLL_EXPORT const char *tmq_err2str(int32_t code);DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param);
```
这些 API 的文档请见 [C/C++ Connector](../../connector/cpp),下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。
下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。
</TabItem>
<TabItem value="java" label="Java">
......@@ -120,7 +115,19 @@ Set<String> subscription() throws SQLException;
ConsumerRecords<V> poll(Duration timeout) throws SQLException;
Set<TopicPartition> assignment() throws SQLException;
long position(TopicPartition partition) throws SQLException;
Map<TopicPartition, Long> position(String topic) throws SQLException;
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
Map<TopicPartition, OffsetAndMetadata> committed(Set<TopicPartition> partitions) throws SQLException;
void seek(TopicPartition partition, long offset) throws SQLException;
void seekToBeginning(Collection<TopicPartition> partitions) throws SQLException;
void seekToEnd(Collection<TopicPartition> partitions) throws SQLException;
void commitSync() throws SQLException;
void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets) throws SQLException;
void close() throws SQLException;
```
......
......@@ -296,7 +296,7 @@ ldconfig
3. 如果 Python UDF 程序执行时,通过 PYTHONPATH 引用其它的包,可以设置 taos.cfg 的 UdfdLdLibPath 变量为PYTHONPATH的内容
4. 启动 `taosd` 服务
细节请参考 [快速开始](../../get-started)
细节请参考 [立即开始](../../get-started)
### 接口定义
......@@ -883,5 +883,5 @@ pycumsum 使用 numpy 计算输入列所有数据的累积和。
</details>
## 管理和使用 UDF
在使用 UDF 之前需要先将其加入到 TDengine 系统中。关于如何管理和使用 UDF,请参考[管理和使用 UDF](../12-taos-sql/26-udf.md)
在使用 UDF 之前需要先将其加入到 TDengine 系统中。关于如何管理和使用 UDF,请参考[管理和使用 UDF](../../taos-sql/udf)
......@@ -36,6 +36,7 @@ REST 连接支持所有能运行 Java 的平台。
| taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 |
| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: |
| 3.2.5 | 数据订阅增加 committed()、assignment() 方法 | 3.1.0.3 及更高版本 |
| 3.2.4 | 数据订阅在 WebSocket 连接下增加 enable.auto.commit 参数,以及 unsubscribe() 方法。 | - |
| 3.2.3 | 修复 ResultSet 在一些情况数据解析失败 | - |
| 3.2.2 | 新增功能:数据订阅支持 seek 功能。 | 3.0.5.0 及更高版本 |
......@@ -1022,14 +1023,19 @@ while(true) {
#### 指定订阅 Offset
```java
// 获取订阅的 topicPartition
Set<TopicPartition> assignment() throws SQLException;
// 获取 offset
long position(TopicPartition partition) throws SQLException;
Map<TopicPartition, Long> position(String topic) throws SQLException;
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
Map<TopicPartition, OffsetAndMetadata> committed(Set<TopicPartition> partitions) throws SQLException;
// 指定下一次 poll 中使用的 offset
void seek(TopicPartition partition, long offset) throws SQLException;
void seekToBeginning(Collection<TopicPartition> partitions) throws SQLException;
void seekToEnd(Collection<TopicPartition> partitions) throws SQLException;
```
示例代码:
......@@ -1055,6 +1061,18 @@ try (TaosConsumer<ResultBean> consumer = new TaosConsumer<>(properties)) {
}
```
#### 提交 Offset
当`enable.auto.commit`为 false 时,可以手动提交 offset。
```java
void commitSync() throws SQLException;
void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets) throws SQLException;
// 异步提交仅在 native 连接下有效
void commitAsync(OffsetCommitCallback<V> callback) throws SQLException;
void commitAsync(Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback<V> callback) throws SQLException;
```
#### 关闭订阅
```java
......
......@@ -143,6 +143,7 @@ phpize && ./configure --enable-swoole && make -j && make install
| `TDengine\TSDB_DATA_TYPE_FLOAT` | float |
| `TDengine\TSDB_DATA_TYPE_DOUBLE` | double |
| `TDengine\TSDB_DATA_TYPE_BINARY` | binary |
| `TDengine\TSDB_DATA_TYPE_VARBINARY` | varbinary |
| `TDengine\TSDB_DATA_TYPE_TIMESTAMP` | timestamp |
| `TDengine\TSDB_DATA_TYPE_NCHAR` | nchar |
| `TDengine\TSDB_DATA_TYPE_UTINYINT` | utinyint |
......
......@@ -4,8 +4,6 @@ import PkgListV3 from "/components/PkgListV3";
<PkgListV3 type={1} sys="Linux" />
[所有下载](../../releases/tdengine)
2. 解压缩软件包
将软件包放置在当前用户可读写的任意目录下,然后执行下面的命令:`tar -xzvf TDengine-client-VERSION.tar.gz`
......
......@@ -4,8 +4,6 @@ import PkgListV3 from "/components/PkgListV3";
<PkgListV3 type={8} sys="macOS" />
[所有下载](../../releases/tdengine)
2. 执行安装程序,按提示选择默认值,完成安装。如果安装被阻止,可以右键或者按 Ctrl 点击安装包,选择 `打开`。
3. 配置 taos.cfg
......
......@@ -3,9 +3,7 @@ import PkgListV3 from "/components/PkgListV3";
1. 下载客户端安装包
<PkgListV3 type={4} sys="Windows" />
[所有下载](../../releases/tdengine)
2. 执行安装程序,按提示选择默认值,完成安装
3. 安装路径
......
......@@ -62,12 +62,13 @@ serverPort 6030
加入到集群中的数据节点 dnode,下表中涉及集群相关的参数必须完全相同,否则不能成功加入到集群中。
| **#** | **配置参数名称** | **含义** |
| ----- | ------------------ | ------------------------------------------- |
| 1 | statusInterval | dnode 向 mnode 报告状态时长 |
| 2 | timezone | 时区 |
| 3 | locale | 系统区位信息及编码格式 |
| 4 | charset | 字符集编码 |
| **#** | **配置参数名称** | **含义** |
| ----- | ---------------- | ------------------------------------ |
| 1 | statusInterval | dnode 向 mnode 报告状态时长 |
| 2 | timezone | 时区 |
| 3 | locale | 系统区位信息及编码格式 |
| 4 | charset | 字符集编码 |
| 5 | ttlChangeOnWrite | ttl 到期时间是否伴随表的修改操作改变 |
## 启动集群
......@@ -196,10 +197,10 @@ dnodeID 是集群自动分配的,不得人工指定。它在生成时是递增
1、建立集群时使用 CREATE DNODE 增加新节点后,新节点始终显示 offline 状态?
```sql
1)首先要检查增加的新节点上的 taosd 服务是否已经正常启动
2)如果已经启动,再检查到新节点的网络是否通畅,可以使用 ping fqdn 验证下
3)如果前面两步都没有问题,这一步要检查新节点做为独立集群在运行了,可以使用 taos -h fqdn 连接上后,show dnodes; 命令查看.
如果显示的列表与你主节点上显示的不一致,说明此节点自己单独成立了一个集群,解决的方法是停止新节点上的服务,然后清空新节点上
如果显示的列表与你主节点上显示的不一致,说明此节点自己单独成立了一个集群,解决的方法是停止新节点上的服务,然后清空新节点上
taos.cfg 中配置的 dataDir 目录下的所有文件,重新启动新节点服务即可解决。
```
```
......@@ -42,11 +42,12 @@ CREATE DATABASE db_name PRECISION 'ns';
| 14 | NCHAR | 自定义 | 记录包含多字节字符在内的字符串,如中文字符。每个 NCHAR 字符占用 4 字节的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\'`。NCHAR 使用时须指定字符串大小,类型为 NCHAR(10) 的列表示此列的字符串最多存储 10 个 NCHAR 字符。如果用户字符串长度超出声明长度,将会报错。 |
| 15 | JSON | | JSON 数据类型, 只有 Tag 可以是 JSON 格式 |
| 16 | VARCHAR | 自定义 | BINARY 类型的别名 |
| 17 | GEOMETRY | 自定义 | 几何类型 |
| 17 | GEOMETRY | 自定义 | 几何类型
| 18 | VARBINARY | 自定义 | 可变长的二进制数据|
:::note
- 表的每行长度不能超过 48KB(从 3.0.5.0 版本开始为 64KB)(注意:每个 BINARY/NCHAR/GEOMETRY 类型的列还会额外占用 2 个字节的存储位置)。
- 表的每行长度不能超过 48KB(从 3.0.5.0 版本开始为 64KB)(注意:每个 BINARY/NCHAR/GEOMETRY/VARBINARY 类型的列还会额外占用 2 个字节的存储位置)。
- 虽然 BINARY 类型在底层存储上支持字节型的二进制字符,但不同编程语言对二进制数据的处理方式并不保证一致,因此建议在 BINARY 类型中只存储 ASCII 可见字符,而避免存储不可见字符。多字节的数据,例如中文字符,则需要使用 NCHAR 类型进行保存。如果强行使用 BINARY 类型保存中文字符,虽然有时也能正常读写,但并不带有字符集信息,很容易出现数据乱码甚至数据损坏等情况。
- BINARY 类型理论上最长可以有 16,374(从 3.0.5.0 版本开始,数据列为 65,517,标签列为 16,382) 字节。BINARY 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 BINARY(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 字节的存储空间,总共固定占用 20 字节的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\'`
- GEOMETRY 类型数据列为最大长度为 65,517 字节,标签列最大长度为 16,382 字节。支持 2D 的 POINT、LINESTRING 和 POLYGON 子类型数据。长度计算方式如下表所示:
......@@ -58,6 +59,7 @@ CREATE DATABASE db_name PRECISION 'ns';
| 3 | POLYGON((1.0 1.0, 2.0 2.0, 1.0 1.0)) | 13+3*16 | 13+4094*16 | +16 |
- SQL 语句中的数值类型将依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型,因此在使用时要注意相应类型越界的情况。例如,9999999999999999999 会认为超过长整型的上边界而溢出,而 9999999999999999999.0 会被认为是有效的浮点数。
- VARBINARY 是一种存储二进制数据的数据类型,最大长度为 65,517 字节,标签列最大长度为 16,382 字节。可以通过sql或schemaless方式写入二进制数据(需要转换为\x开头的字符串写入),也可以通过stmt方式写入(可以直接使用二进制)。显示时通过16进制\x开头。
:::
......
......@@ -9,7 +9,7 @@ description: 查询数据的详细语法
```sql
SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW() | TODAY() | TIMEZONE() | CURRENT_USER() | USER() }
SELECT [DISTINCT] select_list
SELECT [hints] [DISTINCT] [TAGS] select_list
from_clause
[WHERE condition]
[partition_by_clause]
......@@ -21,6 +21,11 @@ SELECT [DISTINCT] select_list
[LIMIT limit_val [OFFSET offset_val]]
[>> export_file]
hints: /*+ [hint([hint_param_list])] [hint([hint_param_list])] */
hint:
BATCH_SCAN | NO_BATCH_SCAN
select_list:
select_expr [, select_expr] ...
......@@ -70,6 +75,29 @@ order_expr:
{expr | position | c_alias} [DESC | ASC] [NULLS FIRST | NULLS LAST]
```
## Hints
Hints 是用户控制单个语句查询优化的一种手段,当 Hint 不适用于当前的查询语句时会被自动忽略,具体说明如下:
- Hints 语法以`/*+`开始,终于`*/`,前后可有空格。
- Hints 语法只能跟随在 SELECT 关键字后。
- 每个 Hints 可以包含多个 Hint,Hint 间以空格分开,当多个 Hint 冲突或相同时以先出现的为准。
- 当 Hints 中某个 Hint 出现错误时,错误出现之前的有效 Hint 仍然有效,当前及之后的 Hint 被忽略。
- hint_param_list 是每个 Hint 的参数,根据每个 Hint 的不同而不同。
目前支持的 Hints 列表如下:
| **Hint** | **参数** | **说明** | **适用范围** |
| :-----------: | -------------- | -------------------------- | -------------------------- |
| BATCH_SCAN | 无 | 采用批量读表的方式 | 超级表 JOIN 语句 |
| NO_BATCH_SCAN | 无 | 采用顺序读表的方式 | 超级表 JOIN 语句 |
举例:
```sql
SELECT /*+ BATCH_SCAN() */ a.ts FROM stable1 a, stable2 b where a.tag0 = b.tag0 and a.ts = b.ts;
```
## 列表
查询语句可以指定部分或全部列作为返回结果。数据列和标签列都可以出现在列表中。
......@@ -132,6 +160,16 @@ SELECT DISTINCT col_name [, col_name ...] FROM tb_name;
:::
### 标签查询
当查询的列只有标签列时,`TAGS` 关键字可以指定返回所有子表的标签列。每个子表只返回一行标签列。
返回所有子表的标签列:
```sql
SELECT TAGS tag_name [, tag_name ...] FROM stb_name
```
### 结果集列名
`SELECT`子句中,如果不指定返回结果集合的列名,结果集列名称默认使用`SELECT`子句中的表达式名称作为列名称。此外,用户可使用`AS`来重命名返回结果集合中列的名称。例如:
......
......@@ -48,4 +48,6 @@ SELECT * FROM information_schema.INS_INDEXES
6. 不支持对普通和子表建立索引。
7. 如果某个 tag 列的唯一值较少时,不建议对其建立索引,这种情况下收效甚微。
\ No newline at end of file
7. 如果某个 tag 列的唯一值较少时,不建议对其建立索引,这种情况下收效甚微。
8. 新建立的超级表,会给第一列tag,随机生成一个indexNewName, 生成规则是:tag0的name + 23个byte, 在系统表可以查,也可以按需要drop,行为和其他列tag 的索引一样
......@@ -402,7 +402,7 @@ CAST(expr AS type_name)
**返回结果类型**:CAST 中指定的类型(type_name)。
**适用数据类型**:输入参数 expression 的类型可以是除JSON外的所有类型。
**适用数据类型**:输入参数 expr 的类型可以是除JSON和VARBINARY外的所有类型。如果 type_name 为 VARBINARY,则 expr 只能是 VARCHAR 类型。
**嵌套子查询支持**:适用于内层查询和外层查询。
......@@ -700,7 +700,7 @@ ELAPSED(ts_primary_key [, time_unit])
LEASTSQUARES(expr, start_val, step_val)
```
**功能说明**:统计表中某列的值是主键(时间戳)的拟合直线方程。start_val 是自变量初始值,step_val 是自变量的步长值。
**功能说明**:统计表中某列的值的拟合直线方程。start_val 是自变量初始值,step_val 是自变量的步长值。
**返回数据类型**:字符串表达式(斜率, 截距)。
......
......@@ -201,7 +201,6 @@ TDengine 对于修改数据提供两种处理方式,由 IGNORE UPDATE 选项
对于已经存在的超级表,检查列的schema信息
1. 检查列的schema信息是否匹配,对于不匹配的,则自动进行类型转换,当前只有数据长度大于4096byte时才报错,其余场景都能进行类型转换。
2. 检查列的个数是否相同,如果不同,需要显示的指定超级表与subquery的列的对应关系,否则报错;如果相同,可以指定对应关系,也可以不指定,不指定则按位置顺序对应。
3. 至少自定义一个tag,否则报错。详见 自定义TAG
## 自定义TAG
......@@ -249,3 +248,12 @@ T = 最新事件时间 - DELETE_MARK
- [unique](../function/#unique)
- [mode](../function/#mode)
## 暂停、恢复流计算
1.流计算暂停计算任务
PAUSE STREAM [IF EXISTS] stream_name;
没有指定IF EXISTS,如果该stream不存在,则报错;如果存在,则暂停流计算。指定了IF EXISTS,如果该stream不存在,则返回成功;如果存在,则暂停流计算
2.流计算恢复计算任务
RESUME STREAM [IF EXISTS] [IGNORE UNTREATED] stream_name;
没有指定IF EXISTS,如果该stream不存在,则报错,如果存在,则恢复流计算;指定了IF EXISTS,如果stream不存在,则返回成功;如果存在,则恢复流计算。如果指定IGNORE UNTREATED,则恢复流计算时,忽略流计算暂停期间写入的数据。
---
sidebar_label: 权限管理
title: 权限管理
description: 企业版中才具有的权限管理功能
---
本节讲述如何在 TDengine 中进行权限管理的相关操作。权限管理是 TDengine 企业版的特有功能,本节只列举了一些基本的权限管理功能作为示例,更丰富的权限管理请联系 TDengine 销售或市场团队。
## 创建用户
```sql
CREATE USER use_name PASS 'password' [SYSINFO {1|0}];
```
创建用户。
use_name 最长为 23 字节。
password 最长为 31 字节,合法字符包括"a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/",不可以出现单双引号、撇号、反斜杠和空格,且不可以为空。
SYSINFO 表示用户是否可以查看系统信息。1 表示可以查看,0 表示不可以查看。系统信息包括服务端配置信息、服务端各种节点信息(如 DNODE、QNODE等)、存储相关的信息等。默认为可以查看系统信息。
例如,创建密码为123456且可以查看系统信息的用户test如下:
```sql
taos> create user test pass '123456' sysinfo 1;
Query OK, 0 of 0 rows affected (0.001254s)
```
## 查看用户
```sql
SHOW USERS;
```
查看用户信息。
```sql
taos> show users;
name | super | enable | sysinfo | create_time |
================================================================================
test | 0 | 1 | 1 | 2022-08-29 15:10:27.315 |
root | 1 | 1 | 1 | 2022-08-29 15:03:34.710 |
Query OK, 2 rows in database (0.001657s)
```
也可以通过查询INFORMATION_SCHEMA.INS_USERS系统表来查看用户信息,例如:
```sql
taos> select * from information_schema.ins_users;
name | super | enable | sysinfo | create_time |
================================================================================
test | 0 | 1 | 1 | 2022-08-29 15:10:27.315 |
root | 1 | 1 | 1 | 2022-08-29 15:03:34.710 |
Query OK, 2 rows in database (0.001953s)
```
## 删除用户
```sql
DROP USER user_name;
```
## 修改用户信息
```sql
ALTER USER user_name alter_user_clause
alter_user_clause: {
PASS 'literal'
| ENABLE value
| SYSINFO value
}
```
- PASS:修改用户密码。
- ENABLE:修改用户是否启用。1 表示启用此用户,0 表示禁用此用户。
- SYSINFO:修改用户是否可查看系统信息。1 表示可以查看系统信息,0 表示不可以查看系统信息。
例如,禁用 test 用户:
```sql
taos> alter user test enable 0;
Query OK, 0 of 0 rows affected (0.001160s)
```
## 授权
```sql
GRANT privileges ON priv_level TO user_name
privileges : {
ALL
| priv_type [, priv_type] ...
}
priv_type : {
READ
| WRITE
}
priv_level : {
dbname.*
| *.*
}
```
对用户授权。授权功能只包含在企业版中。
授权级别支持到DATABASE,权限有READ和WRITE两种。
TDengine 有超级用户和普通用户两类用户。超级用户缺省创建为root,拥有所有权限。使用超级用户创建出来的用户为普通用户。在未授权的情况下,普通用户可以创建DATABASE,并拥有自己创建的DATABASE的所有权限,包括删除数据库、修改数据库、查询时序数据和写入时序数据。超级用户可以给普通用户授予其他DATABASE的读写权限,使其可以在此DATABASE上读写数据,但不能对其进行删除和修改数据库的操作。
对于非DATABASE的对象,如USER、DNODE、UDF、QNODE等,普通用户只有读权限(一般为SHOW命令),不能创建和修改。
## 撤销授权
```sql
REVOKE privileges ON priv_level FROM user_name
privileges : {
ALL
| priv_type [, priv_type] ...
}
priv_type : {
READ
| WRITE
}
priv_level : {
dbname.*
| *.*
}
```
收回对用户的授权。授权功能只包含在企业版中。
......@@ -20,6 +20,9 @@ index_option:
functions:
function [, function] ...
```
### tag 索引
[tag 索引](../tag-index)
### SMA 索引
......
......@@ -180,7 +180,7 @@ AllowWebSockets
node_export 是一个机器指标的导出器。请访问 [https://github.com/prometheus/node_exporter](https://github.com/prometheus/node_exporter) 了解更多信息。
- 支持 Prometheus remote_read 和 remote_write
remote_read 和 remote_write 是 Prometheus 数据读写分离的集群方案。请访问[https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis](https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis) 了解更多信息。
- 获取 table 所在的虚拟节点组(VGroup)的 VGroup ID。关于虚拟节点组(VGroup)的更多信息,请访问[整体架构文档](/tdinternal/arch/#主要逻辑单元)
- 获取 table 所在的虚拟节点组(VGroup)的 VGroup ID。
## 接口
......@@ -245,7 +245,7 @@ Prometheus 使用的由 \*NIX 内核暴露的硬件和操作系统指标的输
### 获取 table 的 VGroup ID
可以访问 http 接口 `http://<fqdn>:6041/rest/vgid?db=<db>&table=<table>` 获取 table 的 VGroup ID。关于虚拟节点组(VGroup)的更多信息,请访问[整体架构文档](/tdinternal/arch/#主要逻辑单元)
可以访问 http 接口 `http://<fqdn>:6041/rest/vgid?db=<db>&table=<table>` 获取 table 的 VGroup ID。
## 内存使用优化方法
......
......@@ -11,11 +11,7 @@ taosBenchmark (曾用名 taosdemo ) 是一个用于测试 TDengine 产品性能
## 安装
taosBenchmark 有两种安装方式:
- 安装 TDengine 官方安装包的同时会自动安装 taosBenchmark, 详情请参考[ TDengine 安装](/operation/pkg-install)
- 单独编译 taos-tools 并安装, 详情请参考 [taos-tools](https://github.com/taosdata/taos-tools) 仓库。
- 安装 TDengine 官方安装包的同时会自动安装 taosBenchmark
## 运行
......
......@@ -15,7 +15,7 @@ TDengine 通过 [taosKeeper](../taosKeeper) 将服务器的 CPU、内存、硬
- 单节点的 TDengine 服务器或多节点的 [TDengine] 集群,以及一个[Grafana]服务器。此仪表盘需要 TDengine 3.0.0.0 及以上,并开启监控服务,具体配置请参考:[TDengine 监控配置](../config/#监控相关)。
- taosAdapter 已经安装并正常运行。具体细节请参考:[taosAdapter 使用手册](../taosadapter)
- taosKeeper 已安装并正常运行。具体细节请参考:[taosKeeper 使用手册](../taosKeeper)
- taosKeeper 已安装并正常运行。注意需要 taos.cfg 文件中打开 monitor 相关配置项,具体细节请参考:[taosKeeper 使用手册](../taosKeeper)
记录以下信息:
......@@ -120,7 +120,7 @@ chmod +x TDinsight.sh
./TDinsight.sh
```
这个脚本会自动下载最新的[Grafana TDengine 数据源插件](https://github.com/taosdata/grafanaplugin/releases/latest) 和 [TDinsight 仪表盘](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsightV3.json) ,将命令行选项中的可配置参数转为 [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) 配置文件,以进行自动化部署及更新等操作。利用该脚本提供的告警设置选项,你还可以获得内置的阿里云短信告警通知支持。
这个脚本会自动下载最新的[Grafana TDengine 数据源插件](https://github.com/taosdata/grafanaplugin/releases/latest) 和 [TDinsight 仪表盘](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsightV3.json) ,将命令行选项中的可配置参数转为 [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) 配置文件,以进行自动化部署及更新等操作。
假设您在同一台主机上使用 TDengine 和 Grafana 的默认服务。运行 `./TDinsight.sh` 并打开 Grafana 浏览器窗口就可以看到 TDinsight 仪表盘了。
......@@ -152,9 +152,6 @@ Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 syste
-i, --tdinsight-uid <string> Replace with a non-space ASCII code as the dashboard id. [default: tdinsight]
-t, --tdinsight-title <string> Dashboard title. [default: TDinsight]
-e, --tdinsight-editable If the provisioning dashboard could be editable. [default: false]
-E, --external-notifier <string> Apply external notifier uid to TDinsight dashboard.
```
大多数命令行选项都可以通过环境变量获得同样的效果。
......@@ -172,7 +169,10 @@ Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 syste
| -i | --tdinsight-uid | TDINSIGHT_DASHBOARD_UID | TDinsight 仪表盘`uid`。 [默认值:tdinsight] |
| -t | --tdinsight-title | TDINSIGHT_DASHBOARD_TITLE | TDinsight 仪表盘标题。 [默认:TDinsight] |
| -e | --tdinsight-editable | TDINSIGHT_DASHBOARD_EDITABLE | 如果配置仪表盘可以编辑。 [默认值:false] |
| -E | --external-notifier | EXTERNAL_NOTIFIER | 将外部通知程序 uid 应用于 TDinsight 仪表盘。 |
:::note
新版本插件使用 Grafana unified alerting 功能,`-E` 选项不再支持。
:::
假设您在主机 `tdengine` 上启动 TDengine 数据库,HTTP API 端口为 `6041`,用户为 `root1`,密码为 `pass5ord`。执行脚本:
......@@ -180,18 +180,6 @@ Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 syste
./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord
```
我们提供了一个“-E”选项,用于从命令行配置 TDinsight 使用现有的通知通道(Notification Channel)。假设你的 Grafana 用户和密码是 `admin:admin`,使用以下命令获取已有的通知通道的`uid`:
```bash
curl --no-progress-meter -u admin:admin http://localhost:3000/api/alert-notifications | jq
```
使用上面获取的 `uid` 值作为 `-E` 输入。
```bash
./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
```
如果要监控多个 TDengine 集群,则需要设置多个 TDinsight 仪表盘。设置非默认 TDinsight 需要进行一些更改: `-n` `-i` `-t` 选项需要更改为非默认名称,如果使用 内置短信告警功能,`-N` 和 `-L` 也应该改变。
```bash
......
......@@ -33,8 +33,10 @@ tag_set 中的所有的数据自动转化为 nchar 数据类型,并不需要
在无模式写入数据行协议中,field_set 中的每个数据项都需要对自身的数据类型进行描述。具体来说:
- 如果两边有英文双引号,表示 VARCHAR(N) 类型。例如 `"abc"`
- 如果两边有英文双引号而且带有 L 前缀,表示 NCHAR(N) 类型。例如 `L"报错信息"`
- 如果两边有英文双引号,表示 VARCHAR 类型。例如 `"abc"`
- 如果两边有英文双引号而且带有 L或l 前缀,表示 NCHAR 类型。例如 `L"报错信息"`
- 如果两边有英文双引号而且带有 G或g 前缀,表示 GEOMETRY 类型。例如 `G"Point(4.343 89.342)"`
- 如果两边有英文双引号而且带有 B或b 前缀,表示 VARBINARY 类型,双引号内可以为\x开头的16进制或者字符串。例如 `B"\x98f46e"` `B"hello"`
- 对空格、等号(=)、逗号(,)、双引号(")、反斜杠(\),前面需要使用反斜杠(\)进行转义。(都指的是英文半角符号)。具体转义规则如下:
| **序号** | **域** | **需转义字符** |
......@@ -106,6 +108,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
8. 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field,后面的数据按照这个顺序),如果顺序不一样,需要配置参数 smlDataFormat 为 false,否则,数据写入按照相同顺序写入,库中数据会异常,从3.0.3.0开始,自动检测顺序是否一致,该配置废弃。
9. 由于sql建表表名不支持点号(.),所以schemaless也对点号(.)做了处理,如果schemaless自动建表的表名如果有点号(.),会自动替换为下划线(\_)。如果手动指定子表名的话,子表名里有点号(.),同样转化为下划线(\_)。
10. taos.cfg 增加 smlTsDefaultName 配置(值为字符串),只在client端起作用,配置后,schemaless自动建表的时间列名字可以通过该配置设置。不配置的话,默认为 _ts
11. 无模式写入的数据超级表或子表名区分大小写
:::tip
无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过
......
......@@ -13,12 +13,7 @@ taosKeeper 是 TDengine 3.0 版本监控指标的导出工具,通过简单的
## 安装
taosKeeper 有两种安装方式:
taosKeeper 安装方式:
- 安装 TDengine 官方安装包的同时会自动安装 taosKeeper, 详情请参考[ TDengine 安装](/operation/pkg-install)
- 单独编译 taosKeeper 并安装,详情请参考 [taosKeeper](https://github.com/taosdata/taoskeeper) 仓库。
- 安装 TDengine 官方安装包的同时会自动安装 taosKeeper
## 配置和运行方式
......
此差异已折叠。
......@@ -218,11 +218,11 @@ docker run -d \
### 导入 Dashboard
在数据源配置页面,您可以为该数据源导入 TDinsight 面板,作为 TDengine 集群的监控可视化工具。如果 TDengine 服务端为 3.0 版本请选择 `TDinsight for 3.x` 导入。
在数据源配置页面,您可以为该数据源导入 TDinsight 面板,作为 TDengine 集群的监控可视化工具。如果 TDengine 服务端为 3.0 版本请选择 `TDinsight for 3.x` 导入。注意 TDinsight for 3.x 需要运行和配置 taoskeeper,相关使用说明请见 [TDinsight 用户手册](/reference/tdinsight/)。
![TDengine Database Grafana plugine import dashboard](./import_dashboard.webp)
其中适配 TDengine 2.* 的 Dashboard 已发布在 Grafana:[Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)) 。其他安装方式和相关使用说明请见 [TDinsight 用户手册](/reference/tdinsight/)。
其中适配 TDengine 2.* 的 Dashboard 已发布在 Grafana:[Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)) 。
使用 TDengine 作为数据源的其他面板,可以[在此搜索](https://grafana.com/grafana/dashboards/?dataSource=tdengine-datasource)。以下是一份不完全列表:
......
......@@ -23,7 +23,7 @@ TDengine Source Connector 用于把数据实时地从 TDengine 读出来发送
1. Linux 操作系统
2. 已安装 Java 8 和 Maven
3. 已安装 Git、curl、vi
4. 已安装并启动 TDengine。如果还没有可参考[安装和卸载](/operation/pkg-install)
4. 已安装并启动 TDengine。
## 安装 Kafka
......
此差异已折叠。
......@@ -43,7 +43,7 @@ int main(int argc, char *argv[])
taos_free_result(result);
// create table
const char* sql = "create table m1 (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(40), blob nchar(10))";
const char* sql = "create table m1 (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(40), blob nchar(10), varbin varbinary(16))";
result = taos_query(taos, sql);
code = taos_errno(result);
if (code != 0) {
......@@ -68,6 +68,7 @@ int main(int argc, char *argv[])
double f8;
char bin[40];
char blob[80];
int8_t varbin[16];
} v = {0};
int32_t boolLen = sizeof(int8_t);
......@@ -80,7 +81,7 @@ int main(int argc, char *argv[])
int32_t ncharLen = 30;
stmt = taos_stmt_init(taos);
TAOS_MULTI_BIND params[10];
TAOS_MULTI_BIND params[11];
params[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
params[0].buffer_length = sizeof(v.ts);
params[0].buffer = &v.ts;
......@@ -152,9 +153,19 @@ int main(int argc, char *argv[])
params[9].is_null = NULL;
params[9].num = 1;
int8_t tmp[16] = {'a', 0, 1, 13, '1'};
int32_t vbinLen = 5;
memcpy(v.varbin, tmp, sizeof(v.varbin));
params[10].buffer_type = TSDB_DATA_TYPE_VARBINARY;
params[10].buffer_length = sizeof(v.varbin);
params[10].buffer = v.varbin;
params[10].length = &vbinLen;
params[10].is_null = NULL;
params[10].num = 1;
char is_null = 1;
sql = "insert into m1 values(?,?,?,?,?,?,?,?,?,?)";
sql = "insert into m1 values(?,?,?,?,?,?,?,?,?,?,?)";
code = taos_stmt_prepare(stmt, sql, 0);
if (code != 0){
printf("failed to execute taos_stmt_prepare. code:0x%x\n", code);
......@@ -162,7 +173,7 @@ int main(int argc, char *argv[])
v.ts = 1591060628000;
for (int i = 0; i < 10; ++i) {
v.ts += 1;
for (int j = 1; j < 10; ++j) {
for (int j = 1; j < 11; ++j) {
params[j].is_null = ((i == j) ? &is_null : 0);
}
v.b = (int8_t)i % 2;
......@@ -216,7 +227,7 @@ int main(int argc, char *argv[])
printf("expect two rows, but %d rows are fetched\n", rows);
}
taos_free_result(result);
// taos_free_result(result);
taos_stmt_close(stmt);
return 0;
......
......@@ -280,7 +280,7 @@ void consume_repeatly(tmq_t* tmq) {
code = tmq_offset_seek(tmq, topic_name, p->vgId, p->begin);
if (code != 0) {
fprintf(stderr, "failed to seek to %ld, reason:%s", p->begin, tmq_err2str(code));
fprintf(stderr, "failed to seek to %d, reason:%s", (int)p->begin, tmq_err2str(code));
}
}
......
......@@ -272,7 +272,7 @@ typedef struct tmq_topic_assignment {
int32_t vgId;
int64_t currentOffset;
int64_t begin;
int64_t end; // The last invalidate version of wal + 1
int64_t end;
} tmq_topic_assignment;
DLL_EXPORT tmq_conf_t *tmq_conf_new();
......@@ -305,7 +305,7 @@ DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t v
DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);
DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res);
DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res);
DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res); // Get current offset of the result
DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res);
DLL_EXPORT const char *tmq_err2str(int32_t code);
/* ------------------------------ TAOSX -----------------------------------*/
......
......@@ -179,6 +179,8 @@ int32_t getJsonValueLen(const char* data);
int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull);
int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx, const char* pData);
int32_t colDataSetNItems(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, uint32_t numOfRows, bool trimValue);
int32_t colDataCopyNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData,
uint32_t numOfRows, bool isNull);
int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int32_t* capacity,
const SColumnInfoData* pSource, int32_t numOfRow2);
int32_t colDataAssign(SColumnInfoData* pColumnInfoData, const SColumnInfoData* pSource, int32_t numOfRows,
......@@ -241,7 +243,7 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols);
const char* blockDecode(SSDataBlock* pBlock, const char* pData);
// for debug
char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** dumpBuf);
char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** dumpBuf, const char* taskIdStr);
int32_t buildSubmitReqFromDataBlock(SSubmitReq2** pReq, const SSDataBlock* pDataBlocks, const STSchema* pTSchema, int64_t uid, int32_t vgId,
tb_uid_t suid);
......
......@@ -102,6 +102,11 @@ extern uint16_t tsMonitorPort;
extern int32_t tsMonitorMaxLogs;
extern bool tsMonitorComp;
// audit
extern bool tsEnableAudit;
extern char tsAuditFqdn[];
extern uint16_t tsAuditPort;
// telem
extern bool tsEnableTelem;
extern int32_t tsTelemInterval;
......@@ -130,6 +135,7 @@ extern bool tsKeepColumnName;
extern bool tsEnableQueryHb;
extern bool tsEnableScience;
extern bool tsTtlChangeOnWrite;
extern int32_t tsTtlFlushThreshold;
extern int32_t tsRedirectPeriod;
extern int32_t tsRedirectFactor;
extern int32_t tsRedirectMaxPeriod;
......@@ -161,6 +167,7 @@ extern char tsCompressor[];
// tfs
extern int32_t tsDiskCfgNum;
extern SDiskCfg tsDiskCfg[];
extern int64_t tsMinDiskFreeSize;
// udf
extern bool tsStartUdfd;
......@@ -184,8 +191,11 @@ extern int64_t tsWalFsyncDataSizeLimit;
extern int32_t tsTransPullupInterval;
extern int32_t tsMqRebalanceInterval;
extern int32_t tsStreamCheckpointTickInterval;
extern int32_t tsStreamNodeCheckInterval;
extern int32_t tsTtlUnit;
extern int32_t tsTtlPushInterval;
extern int32_t tsTtlPushIntervalSec;
extern int32_t tsTtlBatchDropNum;
extern int32_t tsTrimVDbIntervalSec;
extern int32_t tsGrantHBInterval;
extern int32_t tsUptimeInterval;
......@@ -194,7 +204,6 @@ extern int32_t tsRpcRetryInterval;
extern bool tsDisableStream;
extern int64_t tsStreamBufferSize;
extern int64_t tsCheckpointInterval;
extern bool tsFilterScalarMode;
extern int32_t tsKeepTimeOffset;
extern int32_t tsMaxStreamBackendCache;
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -97,6 +97,8 @@ int32_t dsPutDataBlock(DataSinkHandle handle, const SInputData* pInput, bool* pC
void dsEndPut(DataSinkHandle handle, uint64_t useconds);
void dsReset(DataSinkHandle handle);
/**
* Get the length of the data returned by the next call to dsGetDataBlock.
* @param handle
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册