...
 
Commits (1715)
...@@ -175,7 +175,7 @@ cd TDengine ...@@ -175,7 +175,7 @@ cd TDengine
```bash ```bash
mkdir debug mkdir debug
cd debug cd debug
cmake .. -DBUILD_TOOLS=true cmake .. -DBUILD_TOOLS=true -DBUILD_CONTRIB=true
make make
``` ```
......
...@@ -183,7 +183,7 @@ It equals to execute following commands: ...@@ -183,7 +183,7 @@ It equals to execute following commands:
```bash ```bash
mkdir debug mkdir debug
cd debug cd debug
cmake .. -DBUILD_TOOLS=true cmake .. -DBUILD_TOOLS=true -DBUILD_CONTRIB=true
make make
``` ```
......
...@@ -4,5 +4,5 @@ if [ ! -d debug ]; then ...@@ -4,5 +4,5 @@ if [ ! -d debug ]; then
mkdir debug || echo -e "failed to make directory for build" mkdir debug || echo -e "failed to make directory for build"
fi fi
cd debug && cmake .. -DBUILD_TOOLS=true && make cd debug && cmake .. -DBUILD_TOOLS=true -DBUILD_CONTRIB=true && make
# apr-util
ExternalProject_Add(aprutil-1
URL https://dlcdn.apache.org//apr/apr-util-1.6.3.tar.gz
URL_HASH SHA256=2b74d8932703826862ca305b094eef2983c27b39d5c9414442e9976a9acf1983
DOWNLOAD_NO_PROGRESS 1
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
#GIT_REPOSITORY https://github.com/apache/apr-util.git
#GIT_TAG 1.5.4
SOURCE_DIR "${TD_CONTRIB_DIR}/apr-util"
#BINARY_DIR ""
BUILD_IN_SOURCE TRUE
BUILD_ALWAYS 1
#UPDATE_COMMAND ""
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --with-apr=$ENV{HOME}/.cos-local.1
#CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr
BUILD_COMMAND make
INSTALL_COMMAND make install
TEST_COMMAND ""
)
# apr
ExternalProject_Add(apr-1
URL https://dlcdn.apache.org//apr/apr-1.7.4.tar.gz
URL_HASH SHA256=a4137dd82a185076fa50ba54232d920a17c6469c30b0876569e1c2a05ff311d9
DOWNLOAD_NO_PROGRESS 1
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
#GIT_REPOSITORY https://github.com/apache/apr.git
#GIT_TAG 1.5.2
SOURCE_DIR "${TD_CONTRIB_DIR}/apr"
BUILD_IN_SOURCE TRUE
UPDATE_DISCONNECTED TRUE
BUILD_ALWAYS 1
#UPDATE_COMMAND ""
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --enable-shared=no
#CONFIGURE_COMMAND ./configure
BUILD_COMMAND make
INSTALL_COMMAND make install
TEST_COMMAND ""
)
cmake_minimum_required(VERSION 3.0) cmake_minimum_required(VERSION 3.0)
set(CMAKE_VERBOSE_MAKEFILE ON) set(CMAKE_VERBOSE_MAKEFILE ON)
set(TD_BUILD_TAOSA_INTERNAL FALSE) set(TD_BUILD_TAOSA_INTERNAL FALSE)
...@@ -78,6 +77,12 @@ ELSE () ...@@ -78,6 +77,12 @@ ELSE ()
SET(TD_TAOS_TOOLS TRUE) SET(TD_TAOS_TOOLS TRUE)
ENDIF () ENDIF ()
IF (${TD_WINDOWS})
SET(TAOS_LIB taos_static)
ELSE ()
SET(TAOS_LIB taos)
ENDIF ()
IF (TD_WINDOWS) IF (TD_WINDOWS)
MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}") MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}")
SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd") SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd")
......
...@@ -125,6 +125,16 @@ option( ...@@ -125,6 +125,16 @@ option(
ON ON
) )
IF(${TD_LINUX})
option(
BUILD_WITH_COS
"If build with cos"
ON
)
ENDIF ()
option( option(
BUILD_WITH_SQLITE BUILD_WITH_SQLITE
"If build with sqlite" "If build with sqlite"
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER) IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER}) SET(TD_VER_NUMBER ${VERNUMBER})
ELSE () ELSE ()
SET(TD_VER_NUMBER "3.1.0.0.alpha") SET(TD_VER_NUMBER "3.2.0.0.alpha")
ENDIF () ENDIF ()
IF (DEFINED VERCOMPATIBLE) IF (DEFINED VERCOMPATIBLE)
......
# cos
ExternalProject_Add(cos
GIT_REPOSITORY https://github.com/tencentyun/cos-c-sdk-v5.git
GIT_TAG v5.0.16
SOURCE_DIR "${TD_CONTRIB_DIR}/cos-c-sdk-v5"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)
# curl
ExternalProject_Add(curl
URL https://curl.se/download/curl-8.2.1.tar.gz
DOWNLOAD_NO_PROGRESS 1
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
#GIT_REPOSITORY https://github.com/curl/curl.git
#GIT_TAG curl-7_88_1
SOURCE_DIR "${TD_CONTRIB_DIR}/curl"
BUILD_IN_SOURCE TRUE
BUILD_ALWAYS 1
#UPDATE_COMMAND ""
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --without-ssl --enable-shared=no --disable-ldap --disable-ldaps --without-brotli --without-zstd
#CONFIGURE_COMMAND ./configure --without-ssl
BUILD_COMMAND make
INSTALL_COMMAND make install
TEST_COMMAND ""
)
# cos
ExternalProject_Add(mxml
GIT_REPOSITORY https://github.com/michaelrsweet/mxml.git
GIT_TAG release-2.12
SOURCE_DIR "${TD_CONTRIB_DIR}/mxml"
#BINARY_DIR ""
BUILD_IN_SOURCE TRUE
#UPDATE_COMMAND ""
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --enable-shared=no
#CONFIGURE_COMMAND ./configure
BUILD_COMMAND make
INSTALL_COMMAND make install
TEST_COMMAND ""
)
...@@ -6,6 +6,39 @@ function(cat IN_FILE OUT_FILE) ...@@ -6,6 +6,39 @@ function(cat IN_FILE OUT_FILE)
file(APPEND ${OUT_FILE} "${CONTENTS}") file(APPEND ${OUT_FILE} "${CONTENTS}")
endfunction(cat IN_FILE OUT_FILE) endfunction(cat IN_FILE OUT_FILE)
if(${TD_LINUX})
set(CONTRIB_TMP_FILE3 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in3")
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
if(${BUILD_WITH_COS})
file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.1/)
cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
endif(${BUILD_WITH_COS})
configure_file(${CONTRIB_TMP_FILE3} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
set(CONTRIB_TMP_FILE2 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in2")
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2})
if(${BUILD_WITH_COS})
cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2})
endif(${BUILD_WITH_COS})
configure_file(${CONTRIB_TMP_FILE2} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
endif(${TD_LINUX})
set(CONTRIB_TMP_FILE "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in") set(CONTRIB_TMP_FILE "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in")
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
...@@ -122,6 +155,16 @@ if(${BUILD_WITH_SQLITE}) ...@@ -122,6 +155,16 @@ if(${BUILD_WITH_SQLITE})
cat("${TD_SUPPORT_DIR}/sqlite_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/sqlite_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif(${BUILD_WITH_SQLITE}) endif(${BUILD_WITH_SQLITE})
# cos
if(${BUILD_WITH_COS})
#cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
#cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
#cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
#cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
add_definitions(-DUSE_COS)
endif(${BUILD_WITH_COS})
# lucene # lucene
if(${BUILD_WITH_LUCENE}) if(${BUILD_WITH_LUCENE})
cat("${TD_SUPPORT_DIR}/lucene_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/lucene_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
...@@ -347,6 +390,31 @@ if (${BUILD_WITH_ROCKSDB}) ...@@ -347,6 +390,31 @@ if (${BUILD_WITH_ROCKSDB})
endif() endif()
endif() endif()
# cos
if(${BUILD_WITH_COS})
if(${TD_LINUX})
set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.1)
#ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a)
option(ENABLE_TEST "Enable the tests" OFF)
INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local.1/include)
MESSAGE("$ENV{HOME}/.cos-local.1/include")
set(CMAKE_BUILD_TYPE debug)
set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME})
set(CMAKE_PROJECT_NAME cos_c_sdk)
add_subdirectory(cos-c-sdk-v5 EXCLUDE_FROM_ALL)
target_include_directories(
cos_c_sdk
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cos-c-sdk-v5/cos_c_sdk>
)
set(CMAKE_PROJECT_NAME ${ORIG_CMAKE_PROJECT_NAME})
else()
endif(${TD_LINUX})
endif(${BUILD_WITH_COS})
# lucene # lucene
# To support build on ubuntu: sudo apt-get install libboost-all-dev # To support build on ubuntu: sudo apt-get install libboost-all-dev
if(${BUILD_WITH_LUCENE}) if(${BUILD_WITH_LUCENE})
......
...@@ -3,6 +3,11 @@ if(${BUILD_WITH_ROCKSDB}) ...@@ -3,6 +3,11 @@ if(${BUILD_WITH_ROCKSDB})
add_subdirectory(rocksdb) add_subdirectory(rocksdb)
endif(${BUILD_WITH_ROCKSDB}) endif(${BUILD_WITH_ROCKSDB})
# cos
if(${BUILD_WITH_COS})
add_subdirectory(cos)
endif(${BUILD_WITH_COS})
if(${BUILD_WITH_LUCENE}) if(${BUILD_WITH_LUCENE})
add_subdirectory(lucene) add_subdirectory(lucene)
endif(${BUILD_WITH_LUCENE}) endif(${BUILD_WITH_LUCENE})
......
add_executable(cosTest "")
target_sources(cosTest
PRIVATE
"${CMAKE_CURRENT_SOURCE_DIR}/main.c"
)
#find_path(APR_INCLUDE_DIR apr-1/apr_time.h)
#find_path(APR_UTIL_INCLUDE_DIR apr/include/apr-1/apr_md5.h)
#find_path(MINIXML_INCLUDE_DIR mxml.h)
#find_path(CURL_INCLUDE_DIR curl/curl.h)
#include_directories (${MINIXML_INCLUDE_DIR})
#include_directories (${CURL_INCLUDE_DIR})
FIND_PROGRAM(APR_CONFIG_BIN NAMES apr-config apr-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/)
#FIND_PROGRAM(APU_CONFIG_BIN NAMES apu-config apu-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/)
IF (APR_CONFIG_BIN)
EXECUTE_PROCESS(
COMMAND ${APR_CONFIG_BIN} --includedir
OUTPUT_VARIABLE APR_INCLUDE_DIR
OUTPUT_STRIP_TRAILING_WHITESPACE
)
ENDIF()
#IF (APU_CONFIG_BIN)
# EXECUTE_PROCESS(
# COMMAND ${APU_CONFIG_BIN} --includedir
# OUTPUT_VARIABLE APR_UTIL_INCLUDE_DIR
# OUTPUT_STRIP_TRAILING_WHITESPACE
# )
#ENDIF()
include_directories (${APR_INCLUDE_DIR})
#include_directories (${APR_UTIL_INCLUDE_DIR})
target_include_directories(
cosTest
PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk"
)
#find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/)
#find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/)
#find_library(MINIXML_LIBRARY mxml)
#find_library(CURL_LIBRARY curl)
target_link_libraries(cosTest cos_c_sdk)
target_link_libraries(cosTest apr-1})
target_link_libraries(cosTest aprutil-1})
target_link_libraries(cosTest mxml)
target_link_libraries(cosTest curl)
此差异已折叠。
...@@ -32,6 +32,20 @@ docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043 ...@@ -32,6 +32,20 @@ docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043
Note that TDengine Server 3.0 uses TCP port 6030. Port 6041 is used by taosAdapter for the REST API service. Ports 6043 through 6049 are used by taosAdapter for other connectors. You can open these ports as needed. Note that TDengine Server 3.0 uses TCP port 6030. Port 6041 is used by taosAdapter for the REST API service. Ports 6043 through 6049 are used by taosAdapter for other connectors. You can open these ports as needed.
If you need to persist data to a specific directory on your local machine, please run the following command:
```shell
docker run -d -v ~/data/taos/dnode/data:/var/lib/taos \
-v ~/data/taos/dnode/log:/var/log/taos \
-p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine
```
:::note
- /var/lib/taos: TDengine's default data file directory. The location can be changed via [configuration file]. Also you can modify ~/data/taos/dnode/data to your any local empty data directory
- /var/log/taos: TDengine's default log file directory. The location can be changed via [configure file]. you can modify ~/data/taos/dnode/log to your any local empty log directory
:::
Run the following command to ensure that your container is running: Run the following command to ensure that your container is running:
```shell ```shell
...@@ -113,4 +127,4 @@ In the query above you are selecting the first timestamp (ts) in the interval, a ...@@ -113,4 +127,4 @@ In the query above you are selecting the first timestamp (ts) in the interval, a
## Additional Information ## Additional Information
For more information about deploying TDengine in a Docker environment, see [Using TDengine in Docker](../../reference/docker). For more information about deploying TDengine in a Docker environment, see [Deploying TDengine with Docker](../../deployment/docker).
...@@ -83,7 +83,7 @@ If `maven` is used to manage the projects, what needs to be done is only adding ...@@ -83,7 +83,7 @@ If `maven` is used to manage the projects, what needs to be done is only adding
<dependency> <dependency>
<groupId>com.taosdata.jdbc</groupId> <groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId> <artifactId>taos-jdbcdriver</artifactId>
<version>3.2.1</version> <version>3.2.4</version>
</dependency> </dependency>
``` ```
......
...@@ -35,11 +35,12 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0 ...@@ -35,11 +35,12 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
:::note :::note
- All the data in `tag_set` will be converted to NCHAR type automatically . - All the data in `tag_set` will be converted to NCHAR type automatically
- Each data in `field_set` must be self-descriptive for its data type. For example 1.2f32 means a value 1.2 of float type. Without the "f" type suffix, it will be treated as type double. - Each data in `field_set` must be self-descriptive for its data type. For example 1.2f32 means a value 1.2 of float type. Without the "f" type suffix, it will be treated as type double
- Multiple kinds of precision can be used for the `timestamp` field. Time precision can be from nanosecond (ns) to hour (h). - Multiple kinds of precision can be used for the `timestamp` field. Time precision can be from nanosecond (ns) to hour (h)
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be created automatically. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored. - The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be created automatically. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored
- It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3, smlDataFormat is discarded since 3.0.3.0) - It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3, smlDataFormat is discarded since 3.0.3.0)
::: :::
For more details please refer to [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) and [TDengine Schemaless](/reference/schemaless/#Schemaless-Line-Protocol) For more details please refer to [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) and [TDengine Schemaless](/reference/schemaless/#Schemaless-Line-Protocol)
......
...@@ -34,6 +34,7 @@ meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3 ...@@ -34,6 +34,7 @@ meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3
``` ```
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored. - The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
Please refer to [OpenTSDB Telnet API](http://opentsdb.net/docs/build/html/api_telnet/put.html) for more details. Please refer to [OpenTSDB Telnet API](http://opentsdb.net/docs/build/html/api_telnet/put.html) for more details.
## Examples ## Examples
...@@ -68,11 +69,11 @@ Database changed. ...@@ -68,11 +69,11 @@ Database changed.
taos> show stables; taos> show stables;
name | name |
================================= =================================
meters.current | meters_current |
meters.voltage | meters_voltage |
Query OK, 2 row(s) in set (0.002544s) Query OK, 2 row(s) in set (0.002544s)
taos> select tbname, * from `meters.current`; taos> select tbname, * from `meters_current`;
tbname | _ts | _value | groupid | location | tbname | _ts | _value | groupid | location |
================================================================================================================================== ==================================================================================================================================
t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.249 | 10.800000000 | 3 | California.LosAngeles | t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.249 | 10.800000000 | 3 | California.LosAngeles |
...@@ -87,5 +88,5 @@ Query OK, 4 row(s) in set (0.005399s) ...@@ -87,5 +88,5 @@ Query OK, 4 row(s) in set (0.005399s)
If you want query the data of `location=California.LosAngeles groupid=3`, here is the query SQL: If you want query the data of `location=California.LosAngeles groupid=3`, here is the query SQL:
```sql ```sql
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3; SELECT * FROM `meters_current` WHERE location = "California.LosAngeles" AND groupid = 3;
``` ```
...@@ -49,6 +49,7 @@ Please refer to [OpenTSDB HTTP API](http://opentsdb.net/docs/build/html/api_http ...@@ -49,6 +49,7 @@ Please refer to [OpenTSDB HTTP API](http://opentsdb.net/docs/build/html/api_http
- In JSON protocol, strings will be converted to NCHAR type and numeric values will be converted to double type. - In JSON protocol, strings will be converted to NCHAR type and numeric values will be converted to double type.
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored. - The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
::: :::
## Examples ## Examples
...@@ -83,11 +84,11 @@ Database changed. ...@@ -83,11 +84,11 @@ Database changed.
taos> show stables; taos> show stables;
name | name |
================================= =================================
meters.current | meters_current |
meters.voltage | meters_voltage |
Query OK, 2 row(s) in set (0.001954s) Query OK, 2 row(s) in set (0.001954s)
taos> select * from `meters.current`; taos> select * from `meters_current`;
_ts | _value | groupid | location | _ts | _value | groupid | location |
=================================================================================================================== ===================================================================================================================
2022-03-28 09:56:51.249 | 10.300000000 | 2.000000000 | California.SanFrancisco | 2022-03-28 09:56:51.249 | 10.300000000 | 2.000000000 | California.SanFrancisco |
...@@ -100,5 +101,5 @@ Query OK, 2 row(s) in set (0.004076s) ...@@ -100,5 +101,5 @@ Query OK, 2 row(s) in set (0.004076s)
If you want query the data of "tags": {"location": "California.LosAngeles", "groupid": 1}, here is the query SQL: If you want query the data of "tags": {"location": "California.LosAngeles", "groupid": 1}, here is the query SQL:
```sql ```sql
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3; SELECT * FROM `meters_current` WHERE location = "California.LosAngeles" AND groupid = 3;
``` ```
...@@ -52,7 +52,7 @@ CREATE TABLE d1004 USING meters TAGS ("California.LosAngeles", 3); ...@@ -52,7 +52,7 @@ CREATE TABLE d1004 USING meters TAGS ("California.LosAngeles", 3);
### Create a Stream ### Create a Stream
```sql ```sql
create stream current_stream into current_stream_output_stb as select _wstart as start, _wend as end, max(current) as max_current from meters where voltage <= 220 interval (5s); create stream current_stream trigger at_once into current_stream_output_stb as select _wstart as wstart, _wend as wend, max(current) as max_current from meters where voltage <= 220 interval (5s);
``` ```
### Write Data ### Write Data
......
...@@ -23,7 +23,20 @@ By subscribing to a topic, a consumer can obtain the latest data in that topic i ...@@ -23,7 +23,20 @@ By subscribing to a topic, a consumer can obtain the latest data in that topic i
To implement these features, TDengine indexes its write-ahead log (WAL) file for fast random access and provides configurable methods for replacing and retaining this file. You can define a retention period and size for this file. For information, see the CREATE DATABASE statement. In this way, the WAL file is transformed into a persistent storage engine that remembers the order in which events occur. However, note that configuring an overly long retention period for your WAL files makes database compression inefficient. TDengine then uses the WAL file instead of the time-series database as its storage engine for queries in the form of topics. TDengine reads the data from the WAL file; uses a unified query engine instance to perform filtering, transformations, and other operations; and finally pushes the data to consumers. To implement these features, TDengine indexes its write-ahead log (WAL) file for fast random access and provides configurable methods for replacing and retaining this file. You can define a retention period and size for this file. For information, see the CREATE DATABASE statement. In this way, the WAL file is transformed into a persistent storage engine that remembers the order in which events occur. However, note that configuring an overly long retention period for your WAL files makes database compression inefficient. TDengine then uses the WAL file instead of the time-series database as its storage engine for queries in the form of topics. TDengine reads the data from the WAL file; uses a unified query engine instance to perform filtering, transformations, and other operations; and finally pushes the data to consumers.
Tips: Data subscription is to consume data from the wal. If some wal files are deleted according to WAL retention policy, the deleted data can't be consumed any more. So you need to set a reasonable value for parameter `WAL_RETENTION_PERIOD` or `WAL_RETENTION_SIZE` when creating the database and make sure your application consume the data in a timely way to make sure there is no data loss. This behavior is similar to Kafka and other widely used message queue products. Tips:(c interface for example)
1. A consumption group consumes all data under the same topic, and different consumption groups are independent of each other;
2. A consumption group consumes all vgroups of the same topic, which can be composed of multiple consumers, but a vgroup is only consumed by one consumer. If the number of consumers exceeds the number of vgroups, the excess consumers do not consume data;
3. On the server side, only one offset is saved for each vgroup, and the offsets for each vgroup are monotonically increasing, but not necessarily continuous. There is no correlation between the offsets of various vgroups;
4. Each poll server will return a result block, which belongs to a vgroup and may contain data from multiple versions of wal. This block can be accessed through tmq_get_vgroup_offset. The offset interface obtains the offset of the first record in the block;
5. If a consumer group has never committed an offset, when its member consumers restart and pull data again, they start consuming from the set value of the parameter auto.offset.reset; In a consumer lifecycle, the client locally records the offset of the most recent pull data and will not pull duplicate data;
6. If a consumer terminates abnormally (without calling tmq_close), they need to wait for about 12 seconds to trigger their consumer group rebalance. The consumer's status on the server will change to LOST, and after about 1 day, the consumer will be automatically deleted; Exit normally, and after exiting, the consumer will be deleted; Add a new consumer, wait for about 2 seconds to trigger Rebalance, and the consumer's status on the server will change to ready;
7. The consumer group Rebalance will reassign Vgroups to all consumer members in the ready state of the group, and consumers can only assign/see/commit/poll operations to the Vgroups they are responsible for;
8. Consumers can tmq_position to obtain the offset of the current consumption, seek to the specified offset, and consume again;
9. Seek points the position to the specified offset without executing the commit operation. Once the seek is successful, it can poll the specified offset and subsequent data;
10. Before the seek operation, tmq must be call tmq_get_topic_assignment, The assignment interface obtains the vgroup ID and offset range of the consumer. The seek operation will detect whether the vgroup ID and offset are legal, and if they are illegal, an error will be reported;
11. Due to the existence of a WAL expiration deletion mechanism, even if the seek operation is successful, it is possible that the offset has expired when polling data. If the offset of poll is less than the WAL minimum version number, it will be consumed from the WAL minimum version number;
12. The tmq_get_vgroup_offset interface obtains the offset of the first data in the result block where the record is located. When seeking to this offset, it will consume all the data in this block. Refer to point four;
13. Data subscription is to consume data from the wal. If some wal files are deleted according to WAL retention policy, the deleted data can't be consumed any more. So you need to set a reasonable value for parameter `WAL_RETENTION_PERIOD` or `WAL_RETENTION_SIZE` when creating the database and make sure your application consume the data in a timely way to make sure there is no data loss. This behavior is similar to Kafka and other widely used message queue products.
## Data Schema and API ## Data Schema and API
...@@ -33,40 +46,59 @@ The related schemas and APIs in various languages are described as follows: ...@@ -33,40 +46,59 @@ The related schemas and APIs in various languages are described as follows:
<TabItem value="c" label="C"> <TabItem value="c" label="C">
```c ```c
typedef struct tmq_t tmq_t; typedef struct tmq_t tmq_t;
typedef struct tmq_conf_t tmq_conf_t; typedef struct tmq_conf_t tmq_conf_t;
typedef struct tmq_list_t tmq_list_t; typedef struct tmq_list_t tmq_list_t;
typedef void(tmq_commit_cb(tmq_t *, int32_t code, void *param)); typedef void(tmq_commit_cb(tmq_t *tmq, int32_t code, void *param));
DLL_EXPORT tmq_list_t *tmq_list_new(); typedef enum tmq_conf_res_t {
DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); TMQ_CONF_UNKNOWN = -2,
DLL_EXPORT void tmq_list_destroy(tmq_list_t *); TMQ_CONF_INVALID = -1,
DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); TMQ_CONF_OK = 0,
DLL_EXPORT const char *tmq_err2str(int32_t code); } tmq_conf_res_t;
DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); typedef struct tmq_topic_assignment {
DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq); int32_t vgId;
DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout); int64_t currentOffset;
DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq); int64_t begin;
DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); int64_t end; // The last version of wal + 1
DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); } tmq_topic_assignment;
enum tmq_conf_res_t { DLL_EXPORT tmq_conf_t *tmq_conf_new();
TMQ_CONF_UNKNOWN = -2, DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value);
TMQ_CONF_INVALID = -1, DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf);
TMQ_CONF_OK = 0, DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param);
};
typedef enum tmq_conf_res_t tmq_conf_res_t; DLL_EXPORT tmq_list_t *tmq_list_new();
DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *);
DLL_EXPORT tmq_conf_t *tmq_conf_new(); DLL_EXPORT void tmq_list_destroy(tmq_list_t *);
DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); DLL_EXPORT int32_t tmq_list_get_size(const tmq_list_t *);
DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); DLL_EXPORT char **tmq_list_to_c_array(const tmq_list_t *);
DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param);
DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen);
DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list);
DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq);
DLL_EXPORT int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics);
DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout);
DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq);
DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); //Commit the msg’s offset + 1
DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
DLL_EXPORT int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
DLL_EXPORT void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param);
DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,int32_t *numOfAssignment);
DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment);
DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); // The current offset is the offset of the last consumed message + 1
DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId);
DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);
DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res);
DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res);
DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res); // Get current offset of the result
DLL_EXPORT const char *tmq_err2str(int32_t code);
``` ```
For more information, see [C/C++ Connector](/reference/connector/cpp).
The following example is based on the smart meter table described in Data Models. For complete sample code, see the C language section below. The following example is based on the smart meter table described in Data Models. For complete sample code, see the C language section below.
</TabItem> </TabItem>
...@@ -81,7 +113,19 @@ Set<String> subscription() throws SQLException; ...@@ -81,7 +113,19 @@ Set<String> subscription() throws SQLException;
ConsumerRecords<V> poll(Duration timeout) throws SQLException; ConsumerRecords<V> poll(Duration timeout) throws SQLException;
Set<TopicPartition> assignment() throws SQLException;
long position(TopicPartition partition) throws SQLException;
Map<TopicPartition, Long> position(String topic) throws SQLException;
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
Map<TopicPartition, OffsetAndMetadata> committed(Set<TopicPartition> partitions) throws SQLException;
void seek(TopicPartition partition, long offset) throws SQLException;
void seekToBeginning(Collection<TopicPartition> partitions) throws SQLException;
void seekToEnd(Collection<TopicPartition> partitions) throws SQLException;
void commitSync() throws SQLException; void commitSync() throws SQLException;
void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets) throws SQLException;
void close() throws SQLException; void close() throws SQLException;
``` ```
......
...@@ -887,4 +887,4 @@ The `pycumsum` function finds the cumulative sum for all data in the input colum ...@@ -887,4 +887,4 @@ The `pycumsum` function finds the cumulative sum for all data in the input colum
</details> </details>
## Manage and Use UDF ## Manage and Use UDF
You need to add UDF to TDengine before using it in SQL queries. For more information about how to manage UDF and how to invoke UDF, please see [Manage and Use UDF](../12-taos-sql/26-udf.md). You need to add UDF to TDengine before using it in SQL queries. For more information about how to manage UDF and how to invoke UDF, please see [Manage and Use UDF](../taos-sql/udf/).
...@@ -62,12 +62,13 @@ serverPort 6030 ...@@ -62,12 +62,13 @@ serverPort 6030
For all the dnodes in a TDengine cluster, the below parameters must be configured exactly the same, any node whose configuration is different from dnodes already in the cluster can't join the cluster. For all the dnodes in a TDengine cluster, the below parameters must be configured exactly the same, any node whose configuration is different from dnodes already in the cluster can't join the cluster.
| **#** | **Parameter** | **Definition** | | **#** | **Parameter** | **Definition** |
| ----- | ------------------ | ------------------------------------------- | | ----- | ---------------- | ----------------------------------------------------------------------------- |
| 1 | statusInterval | The interval by which dnode reports its status to mnode | | 1 | statusInterval | The interval by which dnode reports its status to mnode |
| 2 | timezone | Timezone | | 2 | timezone | Timezone |
| 3 | locale | System region and encoding | | 3 | locale | System region and encoding |
| 4 | charset | Character set | | 4 | charset | Character set |
| 5 | ttlChangeOnWrite | Whether the ttl expiration time changes with the table modification operation |
## Start Cluster ## Start Cluster
...@@ -97,7 +98,7 @@ Then, on the first dnode i.e. h1.tdengine.com in our example, use TDengine CLI ` ...@@ -97,7 +98,7 @@ Then, on the first dnode i.e. h1.tdengine.com in our example, use TDengine CLI `
CREATE DNODE "h2.taos.com:6030"; CREATE DNODE "h2.taos.com:6030";
```` ````
This adds the end point of the new dnode (from Step 4) into the end point list of the cluster. In the command "fqdn:port" should be quoted using double quotes. Change `"h2.taos.com:6030"` to the end point of your new dnode. This adds the end point of the new dnode (from Step 4) into the end point list of the cluster. In the command "fqdn:port" should be quoted using double quotes. Change `"h2.taos.com:6030"` to the end point of your new dnode.
Then on the first dnode h1.tdengine.com, execute `show dnodes` in `taos` Then on the first dnode h1.tdengine.com, execute `show dnodes` in `taos`
......
--- ---
title: Deploying TDengine with Docker title: Deploying TDengine with Docker
sidebar_label: Docker
description: This chapter describes how to start and access TDengine in a Docker container. description: This chapter describes how to start and access TDengine in a Docker container.
--- ---
...@@ -10,8 +11,17 @@ This chapter describes how to start the TDengine service in a container and acce ...@@ -10,8 +11,17 @@ This chapter describes how to start the TDengine service in a container and acce
The TDengine image starts with the HTTP service activated by default, using the following command: The TDengine image starts with the HTTP service activated by default, using the following command:
```shell ```shell
docker run -d --name tdengine -p 6041:6041 tdengine/tdengine docker run -d --name tdengine \
-v ~/data/taos/dnode/data:/var/lib/taos \
-v ~/data/taos/dnode/log:/var/log/taos \
-p 6041:6041 tdengine/tdengine
``` ```
:::note
* /var/lib/taos: TDengine's default data file directory. The location can be changed via [configuration file]. And also you can modify ~/data/taos/dnode/data to your any other local emtpy data directory
* /var/log/taos: TDengine's default log file directory. The location can be changed via [configure file]. And also you can modify ~/data/taos/dnode/log to your any other local empty log directory
:::
The above command starts a container named "tdengine" and maps the HTTP service port 6041 to the host port 6041. You can verify that the HTTP service provided in this container is available using the following command. The above command starts a container named "tdengine" and maps the HTTP service port 6041 to the host port 6041. You can verify that the HTTP service provided in this container is available using the following command.
...@@ -283,39 +293,38 @@ services: ...@@ -283,39 +293,38 @@ services:
environment: environment:
TAOS_FQDN: "td-1" TAOS_FQDN: "td-1"
TAOS_FIRST_EP: "td-1" TAOS_FIRST_EP: "td-1"
ports:
- 6041:6041
- 6030:6030
volumes: volumes:
- taosdata-td1:/var/lib/taos/ # /var/lib/taos: TDengine's default data file directory. The location can be changed via [configuration file]. you can modify ~/data/taos/dnode1/data to your own data directory
- taoslog-td1:/var/log/taos/ - ~/data/taos/dnode1/data:/var/lib/taos
# /var/log/taos: TDengine's default log file directory. The location can be changed via [configure file]. you can modify ~/data/taos/dnode1/log to your own log directory
- ~/data/taos/dnode1/log:/var/log/taos
td-2: td-2:
image: tdengine/tdengine:$VERSION image: tdengine/tdengine:$VERSION
environment: environment:
TAOS_FQDN: "td-2" TAOS_FQDN: "td-2"
TAOS_FIRST_EP: "td-1" TAOS_FIRST_EP: "td-1"
volumes: volumes:
- taosdata-td2:/var/lib/taos/ - ~/data/taos/dnode2/data:/var/lib/taos
- taoslog-td2:/var/log/taos/ - ~/data/taos/dnode2/log:/var/log/taos
td-3: td-3:
image: tdengine/tdengine:$VERSION image: tdengine/tdengine:$VERSION
environment: environment:
TAOS_FQDN: "td-3" TAOS_FQDN: "td-3"
TAOS_FIRST_EP: "td-1" TAOS_FIRST_EP: "td-1"
volumes: volumes:
- taosdata-td3:/var/lib/taos/ - ~/data/taos/dnode3/data:/var/lib/taos
- taoslog-td3:/var/log/taos/ - ~/data/taos/dnode3/log:/var/log/taos
volumes:
taosdata-td1:
taoslog-td1:
taosdata-td2:
taoslog-td2:
taosdata-td3:
taoslog-td3:
``` ```
:::note :::note
- The `VERSION` environment variable is used to set the tdengine image tag - The `VERSION` environment variable is used to set the tdengine image tag
- `TAOS_FIRST_EP` must be set on the newly created instance so that it can join the TDengine cluster; if there is a high availability requirement, `TAOS_SECOND_EP` needs to be used at the same time - `TAOS_FIRST_EP` must be set on the newly created instance so that it can join the TDengine cluster; if there is a high availability requirement, `TAOS_SECOND_EP` needs to be used at the same time
:::
:::
2. Start the cluster 2. Start the cluster
...@@ -382,24 +391,22 @@ networks: ...@@ -382,24 +391,22 @@ networks:
services: services:
td-1: td-1:
image: tdengine/tdengine:$VERSION image: tdengine/tdengine:$VERSION
networks:
- inter
environment: environment:
TAOS_FQDN: "td-1" TAOS_FQDN: "td-1"
TAOS_FIRST_EP: "td-1" TAOS_FIRST_EP: "td-1"
volumes: volumes:
- taosdata-td1:/var/lib/taos/ # /var/lib/taos: TDengine's default data file directory. The location can be changed via [configuration file]. you can modify ~/data/taos/dnode1/data to your own data directory
- taoslog-td1:/var/log/taos/ - ~/data/taos/dnode1/data:/var/lib/taos
# /var/log/taos: TDengine's default log file directory. The location can be changed via [configure file]. you can modify ~/data/taos/dnode1/log to your own log directory
- ~/data/taos/dnode1/log:/var/log/taos
td-2: td-2:
image: tdengine/tdengine:$VERSION image: tdengine/tdengine:$VERSION
networks:
- inter
environment: environment:
TAOS_FQDN: "td-2" TAOS_FQDN: "td-2"
TAOS_FIRST_EP: "td-1" TAOS_FIRST_EP: "td-1"
volumes: volumes:
- taosdata-td2:/var/lib/taos/ - ~/data/taos/dnode2/data:/var/lib/taos
- taoslog-td2:/var/log/taos/ - ~/data/taos/dnode2/log:/var/log/taos
adapter: adapter:
image: tdengine/tdengine:$VERSION image: tdengine/tdengine:$VERSION
entrypoint: "taosadapter" entrypoint: "taosadapter"
...@@ -431,11 +438,6 @@ services: ...@@ -431,11 +438,6 @@ services:
>> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf; >> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf;
nginx -g 'daemon off;'", nginx -g 'daemon off;'",
] ]
volumes:
taosdata-td1:
taoslog-td1:
taosdata-td2:
taoslog-td2:
``` ```
## Deploy with docker swarm ## Deploy with docker swarm
......
...@@ -5,7 +5,7 @@ description: This document describes how to deploy a TDengine cluster on a serve ...@@ -5,7 +5,7 @@ description: This document describes how to deploy a TDengine cluster on a serve
TDengine has a native distributed design and provides the ability to scale out. A few nodes can form a TDengine cluster. If you need higher processing power, you just need to add more nodes into the cluster. TDengine uses virtual node technology to virtualize a node into multiple virtual nodes to achieve load balancing. At the same time, TDengine can group virtual nodes on different nodes into virtual node groups, and use the replication mechanism to ensure the high availability of the system. The cluster feature of TDengine is completely open source. TDengine has a native distributed design and provides the ability to scale out. A few nodes can form a TDengine cluster. If you need higher processing power, you just need to add more nodes into the cluster. TDengine uses virtual node technology to virtualize a node into multiple virtual nodes to achieve load balancing. At the same time, TDengine can group virtual nodes on different nodes into virtual node groups, and use the replication mechanism to ensure the high availability of the system. The cluster feature of TDengine is completely open source.
This document describes how to manually deploy a cluster on a host as well as how to deploy on Kubernetes and by using Helm. This document describes how to manually deploy a cluster on a host directly and deploy a cluster with Docker, Kubernetes or Helm.
```mdx-code-block ```mdx-code-block
import DocCardList from '@theme/DocCardList'; import DocCardList from '@theme/DocCardList';
......
...@@ -42,7 +42,9 @@ In TDengine, the data types below can be used when specifying a column or tag. ...@@ -42,7 +42,9 @@ In TDengine, the data types below can be used when specifying a column or tag.
| 14 | NCHAR | User Defined | Multi-byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\'`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. | | 14 | NCHAR | User Defined | Multi-byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\'`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. |
| 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type. | | 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type. |
| 16 | VARCHAR | User-defined | Alias of BINARY | | 16 | VARCHAR | User-defined | Alias of BINARY |
| 16 | GEOMETRY | User-defined | Geometry | | 17 | GEOMETRY | User-defined | Geometry |
| 18 | VARBINARY | User-defined | Binary data with variable length
:::note :::note
- Each row of the table cannot be longer than 48KB (64KB since version 3.0.5.0) (note that each BINARY/NCHAR/GEOMETRY column takes up an additional 2 bytes of storage space). - Each row of the table cannot be longer than 48KB (64KB since version 3.0.5.0) (note that each BINARY/NCHAR/GEOMETRY column takes up an additional 2 bytes of storage space).
...@@ -57,7 +59,7 @@ In TDengine, the data types below can be used when specifying a column or tag. ...@@ -57,7 +59,7 @@ In TDengine, the data types below can be used when specifying a column or tag.
| 3 | POLYGON((1.0 1.0, 2.0 2.0, 1.0 1.0)) | 13+3*16 | 13+4094*16 | +16 | | 3 | POLYGON((1.0 1.0, 2.0 2.0, 1.0 1.0)) | 13+3*16 | 13+4094*16 | +16 |
- Numeric values in SQL statements will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number. - Numeric values in SQL statements will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number.
- VARBINARY is a data type that stores binary data, with a maximum length of 65517 bytes and a maximum length of 16382 bytes for tag columns. Binary data can be written through SQL or schemaless (which needs to be converted to a string starting with \x), or written through stmt (which can directly use binary). Display starting with hexadecimal starting with \x.
::: :::
## Constants ## Constants
......
...@@ -7,9 +7,9 @@ description: This document describes how to query data in TDengine. ...@@ -7,9 +7,9 @@ description: This document describes how to query data in TDengine.
## Syntax ## Syntax
```sql ```sql
SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW() | TODAY() | TIMEZONE()} SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW() | TODAY() | TIMEZONE() | CURRENT_USER() | USER() }
SELECT [DISTINCT] select_list SELECT [hints] [DISTINCT] [TAGS] select_list
from_clause from_clause
[WHERE condition] [WHERE condition]
[partition_by_clause] [partition_by_clause]
...@@ -21,6 +21,11 @@ SELECT [DISTINCT] select_list ...@@ -21,6 +21,11 @@ SELECT [DISTINCT] select_list
[LIMIT limit_val [OFFSET offset_val]] [LIMIT limit_val [OFFSET offset_val]]
[>> export_file] [>> export_file]
hints: /*+ [hint([hint_param_list])] [hint([hint_param_list])] */
hint:
BATCH_SCAN | NO_BATCH_SCAN
select_list: select_list:
select_expr [, select_expr] ... select_expr [, select_expr] ...
...@@ -70,6 +75,29 @@ order_expr: ...@@ -70,6 +75,29 @@ order_expr:
{expr | position | c_alias} [DESC | ASC] [NULLS FIRST | NULLS LAST] {expr | position | c_alias} [DESC | ASC] [NULLS FIRST | NULLS LAST]
``` ```
## Hints
Hints are a means of user control over query optimization for individual statements. Hints will be ignore automatically if they are not applicable to the current query statement. The specific instructions are as follows:
- Hints syntax starts with `/*+` and ends with `*/`, spaces are allowed before or after.
- Hints syntax can only follow the SELECT keyword.
- Each hints can contain multiple hint, separated by spaces. When multiple hints conflict or are identical, whichever comes first takes effect.
- When an error occurs with a hint in hints, the effective hint before the error is still valid, and the current and subsequent hints are ignored.
- hint_param_list are arguments to each hint, which varies according to each hint.
The list of currently supported Hints is as follows:
| **Hint** | **Params** | **Comment** | **Scopt** |
| :-----------: | -------------- | -------------------------- | -------------------------- |
| BATCH_SCAN | None | Batch table scan | JOIN statment for stable |
| NO_BATCH_SCAN | None | Sequential table scan | JOIN statment for stable |
For example:
```sql
SELECT /*+ BATCH_SCAN() */ a.ts FROM stable1 a, stable2 b where a.tag0 = b.tag0 and a.ts = b.ts;
```
## Lists ## Lists
A query can be performed on some or all columns. Data and tag columns can all be included in the SELECT list. A query can be performed on some or all columns. Data and tag columns can all be included in the SELECT list.
...@@ -167,7 +195,7 @@ The following SQL statement returns the number of subtables within the meters su ...@@ -167,7 +195,7 @@ The following SQL statement returns the number of subtables within the meters su
SELECT COUNT(*) FROM (SELECT DISTINCT TBNAME FROM meters); SELECT COUNT(*) FROM (SELECT DISTINCT TBNAME FROM meters);
``` ```
In the preceding two statements, only tags can be used as filtering conditions in the WHERE clause. For example: In the preceding two statements, only tags can be used as filtering conditions in the WHERE clause.
**\_QSTART and \_QEND** **\_QSTART and \_QEND**
...@@ -197,6 +225,14 @@ The \_IROWTS pseudocolumn can only be used with INTERP function. This pseudocolu ...@@ -197,6 +225,14 @@ The \_IROWTS pseudocolumn can only be used with INTERP function. This pseudocolu
select _irowts, interp(current) from meters range('2020-01-01 10:00:00', '2020-01-01 10:30:00') every(1s) fill(linear); select _irowts, interp(current) from meters range('2020-01-01 10:00:00', '2020-01-01 10:30:00') every(1s) fill(linear);
``` ```
### TAGS Query
The TAGS keyword returns only tag columns from all child tables when only tag columns are specified. One row containing tag columns is returned for each child table.
```sql
SELECT TAGS tag_name [, tag_name ...] FROM stb_name
```
## Query Objects ## Query Objects
`FROM` can be followed by a number of tables or super tables, or can be followed by a sub-query. `FROM` can be followed by a number of tables or super tables, or can be followed by a sub-query.
...@@ -209,8 +245,7 @@ You can perform INNER JOIN statements based on the primary key. The following co ...@@ -209,8 +245,7 @@ You can perform INNER JOIN statements based on the primary key. The following co
3. For supertables, the ON condition must be equivalent to the primary key. In addition, the tag columns of the tables on which the INNER JOIN is performed must have a one-to-one relationship. You cannot specify an OR condition. 3. For supertables, the ON condition must be equivalent to the primary key. In addition, the tag columns of the tables on which the INNER JOIN is performed must have a one-to-one relationship. You cannot specify an OR condition.
4. The tables that are included in a JOIN clause must be of the same type (supertable, standard table, or subtable). 4. The tables that are included in a JOIN clause must be of the same type (supertable, standard table, or subtable).
5. You can include subqueries before and after the JOIN keyword. 5. You can include subqueries before and after the JOIN keyword.
6. You cannot include more than ten tables in a JOIN clause. 6. You cannot include a FILL clause and a JOIN clause in the same statement.
7. You cannot include a FILL clause and a JOIN clause in the same statement.
## GROUP BY ## GROUP BY
...@@ -301,6 +336,12 @@ SELECT TODAY(); ...@@ -301,6 +336,12 @@ SELECT TODAY();
SELECT TIMEZONE(); SELECT TIMEZONE();
``` ```
### Obtain Current User
```sql
SELECT CURRENT_USER();
```
## Regular Expression ## Regular Expression
### Syntax ### Syntax
...@@ -355,7 +396,7 @@ SELECT AVG(CASE WHEN voltage < 200 or voltage > 250 THEN 220 ELSE voltage END) F ...@@ -355,7 +396,7 @@ SELECT AVG(CASE WHEN voltage < 200 or voltage > 250 THEN 220 ELSE voltage END) F
## JOIN ## JOIN
TDengine supports the `INTER JOIN` based on the timestamp primary key, that is, the `JOIN` condition must contain the timestamp primary key. As long as the requirement of timestamp-based primary key is met, `INTER JOIN` can be made between normal tables, sub-tables, super tables and sub-queries at will, and there is no limit on the number of tables. TDengine supports the `INTER JOIN` based on the timestamp primary key, that is, the `JOIN` condition must contain the timestamp primary key. As long as the requirement of timestamp-based primary key is met, `INTER JOIN` can be made between normal tables, sub-tables, super tables and sub-queries at will, and there is no limit on the number of tables, primary key and other conditions must be combined with `AND` operator.
For standard tables: For standard tables:
......
...@@ -49,3 +49,5 @@ You can also add filter conditions to limit the results. ...@@ -49,3 +49,5 @@ You can also add filter conditions to limit the results.
6. You can' create index on a normal table or a child table. 6. You can' create index on a normal table or a child table.
7. If the unique values of a tag column are too few, it's better not to create index on such tag columns, the benefit would be very small. 7. If the unique values of a tag column are too few, it's better not to create index on such tag columns, the benefit would be very small.
8. The newly created super table will randomly generate an index name for the first column of tags, which is composed of the name tag0 column with 23 random bytes, and can be rebuilt or dropped.
...@@ -402,7 +402,7 @@ CAST(expr AS type_name) ...@@ -402,7 +402,7 @@ CAST(expr AS type_name)
**Return value type**: The type specified by parameter `type_name` **Return value type**: The type specified by parameter `type_name`
**Applicable data types**: All data types except JSON **Applicable data types**: All data types except JSON and VARBINARY. If type_name is VARBINARY, expr can only be VARCHAR.
**Nested query**: It can be used in both the outer query and inner query in a nested query. **Nested query**: It can be used in both the outer query and inner query in a nested query.
...@@ -698,7 +698,7 @@ ELAPSED(ts_primary_key [, time_unit]) ...@@ -698,7 +698,7 @@ ELAPSED(ts_primary_key [, time_unit])
LEASTSQUARES(expr, start_val, step_val) LEASTSQUARES(expr, start_val, step_val)
``` ```
**Description**: The linear regression function of the specified column and the timestamp column (primary key), `start_val` is the initial value and `step_val` is the step value. **Description**: The linear regression function of a specified column, `start_val` is the initial value and `step_val` is the step value.
**Return value type**: A string in the format of "(slope, intercept)" **Return value type**: A string in the format of "(slope, intercept)"
...@@ -1274,3 +1274,169 @@ SELECT SERVER_STATUS(); ...@@ -1274,3 +1274,169 @@ SELECT SERVER_STATUS();
``` ```
**Description**: The server status. **Description**: The server status.
### CURRENT_USER
```sql
SELECT CURRENT_USER();
```
**Description**: get current user.
## Geometry Functions
### Geometry Input Functions
Geometry input functions create geometry data from WTK.
#### ST_GeomFromText
```sql
ST_GeomFromText(VARCHAR WKT expr)
```
**Description**: Return a specified GEOMETRY value from Well-Known Text representation (WKT).
**Return value type**: GEOMETRY
**Applicable data types**: VARCHAR
**Applicable table types**: standard tables and supertables
**Explanations**:
- The input can be one of WTK string, like POINT, LINESTRING, POLYGON, MULTIPOINT, MULTILINESTRING, MULTIPOLYGON, GEOMETRYCOLLECTION.
- The output is a GEOMETRY data type, internal defined as binary string.
### Geometry Output Functions
Geometry output functions convert geometry data into WTK.
#### ST_AsText
```sql
ST_AsText(GEOMETRY geom)
```
**Description**: Return a specified Well-Known Text representation (WKT) value from GEOMETRY data.
**Return value type**: VARCHAR
**Applicable data types**: GEOMETRY
**Applicable table types**: standard tables and supertables
**Explanations**:
- The output can be one of WTK string, like POINT, LINESTRING, POLYGON, MULTIPOINT, MULTILINESTRING, MULTIPOLYGON, GEOMETRYCOLLECTION.
### Geometry Relationships Functions
Geometry relationships functions determine spatial relationships between geometries.
#### ST_Intersects
```sql
ST_Intersects(GEOMETRY geomA, GEOMETRY geomB)
```
**Description**: Compares two geometries and returns true if they intersect.
**Return value type**: BOOL
**Applicable data types**: GEOMETRY, GEOMETRY
**Applicable table types**: standard tables and supertables
**Explanations**:
- Geometries intersect if they have any point in common.
#### ST_Equals
```sql
ST_Equals(GEOMETRY geomA, GEOMETRY geomB)
```
**Description**: Returns TRUE if the given geometries are "spatially equal".
**Return value type**: BOOL
**Applicable data types**: GEOMETRY, GEOMETRY
**Applicable table types**: standard tables and supertables
**Explanations**:
- 'Spatially equal' means ST_Contains(A,B) = true and ST_Contains(B,A) = true, and the ordering of points can be different but represent the same geometry structure.
#### ST_Touches
```sql
ST_Touches(GEOMETRY geomA, GEOMETRY geomB)
```
**Description**: Returns TRUE if A and B intersect, but their interiors do not intersect.
**Return value type**: BOOL
**Applicable data types**: GEOMETRY, GEOMETRY
**Applicable table types**: standard tables and supertables
**Explanations**:
- A and B have at least one point in common, and the common points lie in at least one boundary.
- For Point/Point inputs the relationship is always FALSE, since points do not have a boundary.
#### ST_Covers
```sql
ST_Covers(GEOMETRY geomA, GEOMETRY geomB)
```
**Description**: Returns TRUE if every point in Geometry B lies inside (intersects the interior or boundary of) Geometry A.
**Return value type**: BOOL
**Applicable data types**: GEOMETRY, GEOMETRY
**Applicable table types**: standard tables and supertables
**Explanations**:
- A covers B means no point of B lies outside (in the exterior of) A.
#### ST_Contains
```sql
ST_Contains(GEOMETRY geomA, GEOMETRY geomB)
```
**Description**: Returns TRUE if geometry A contains geometry B.
**Return value type**: BOOL
**Applicable data types**: GEOMETRY, GEOMETRY
**Applicable table types**: standard tables and supertables
**Explanations**:
- A contains B if and only if all points of B lie inside (i.e. in the interior or boundary of) A (or equivalently, no points of B lie in the exterior of A), and the interiors of A and B have at least one point in common.
#### ST_ContainsProperly
```sql
ST_ContainsProperly(GEOMETRY geomA, GEOMETRY geomB)
```
**Description**: Returns TRUE if every point of B lies inside A.
**Return value type**: BOOL
**Applicable data types**: GEOMETRY, GEOMETRY
**Applicable table types**: standard tables and supertables
**Explanations**
- There is no point of B that lies on the boundary of A or in the exterior of A.
...@@ -168,3 +168,11 @@ All [scalar functions](../function/#scalar-functions) are available in stream pr ...@@ -168,3 +168,11 @@ All [scalar functions](../function/#scalar-functions) are available in stream pr
- [unique](../function/#unique) - [unique](../function/#unique)
- [mode](../function/#mode) - [mode](../function/#mode)
## Pause\Resume stream
1.pause stream
PAUSE STREAM [IF EXISTS] stream_name;
If "IF EXISTS" is not specified and the stream does not exist, an error will be reported; If "IF EXISTS" is specified and the stream does not exist, success is returned; If the stream exists, paused all stream tasks.
2.resume stream
RESUME STREAM [IF EXISTS] [IGNORE UNTREATED] stream_name;
If "IF EXISTS" is not specified and the stream does not exist, an error will be reported. If "IF EXISTS" is specified and the stream does not exist, success is returned; If the stream exists, all of the stream tasks will be resumed. If "IGNORE UntREATED" is specified, data written during the pause period of stream is ignored when resuming stream.
...@@ -54,7 +54,7 @@ LIKE is used together with wildcards to match strings. Its usage is described as ...@@ -54,7 +54,7 @@ LIKE is used together with wildcards to match strings. Its usage is described as
MATCH and NMATCH are used together with regular expressions to match strings. Their usage is described as follows: MATCH and NMATCH are used together with regular expressions to match strings. Their usage is described as follows:
- Use POSIX regular expression syntax. For more information, see Regular Expressions. - Use POSIX regular expression syntax. For more information, see Regular Expressions.
- Regular expression can be used against only table names, i.e. `tbname`, and tags of binary/nchar types, but can't be used against data columns. - Regular expression can be used against only table names, i.e. `tbname`, and tags/columns of binary/nchar types.
- The maximum length of regular expression string is 128 bytes. Configuration parameter `maxRegexStringLen` can be used to set the maximum allowed regular expression. It's a configuration parameter on the client side, and will take effect after restarting the client. - The maximum length of regular expression string is 128 bytes. Configuration parameter `maxRegexStringLen` can be used to set the maximum allowed regular expression. It's a configuration parameter on the client side, and will take effect after restarting the client.
## Logical Operators ## Logical Operators
......
...@@ -178,6 +178,7 @@ The following list shows all reserved keywords: ...@@ -178,6 +178,7 @@ The following list shows all reserved keywords:
- MATCH - MATCH
- MAX_DELAY - MAX_DELAY
- BWLIMIT
- MAXROWS - MAXROWS
- MERGE - MERGE
- META - META
......
...@@ -98,7 +98,7 @@ Provides information about user-created databases. Similar to SHOW DATABASES. ...@@ -98,7 +98,7 @@ Provides information about user-created databases. Similar to SHOW DATABASES.
| 21 | cachesize | INT | Memory per vnode used for caching the newest data. It should be noted that `cachesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 21 | cachesize | INT | Memory per vnode used for caching the newest data. It should be noted that `cachesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 22 | wal_level | INT | WAL level. It should be noted that `wal_level` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 22 | wal_level | INT | WAL level. It should be noted that `wal_level` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 23 | wal_fsync_period | INT | Interval at which WAL is written to disk. It should be noted that `wal_fsync_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 23 | wal_fsync_period | INT | Interval at which WAL is written to disk. It should be noted that `wal_fsync_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 24 | wal_retention_period | INT | WAL retention period. It should be noted that `wal_retention_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 24 | wal_retention_period | INT | WAL retention period, in second. It should be noted that `wal_retention_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 25 | wal_retention_size | INT | Maximum WAL size. It should be noted that `wal_retention_size` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 25 | wal_retention_size | INT | Maximum WAL size. It should be noted that `wal_retention_size` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 26 | stt_trigger | SMALLINT | The threshold for number of files to trigger file merging. It should be noted that `stt_trigger` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 26 | stt_trigger | SMALLINT | The threshold for number of files to trigger file merging. It should be noted that `stt_trigger` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 27 | table_prefix | SMALLINT | The prefix length in the table name that is ignored when distributing table to vnode based on table name. It should be noted that `table_prefix` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 27 | table_prefix | SMALLINT | The prefix length in the table name that is ignored when distributing table to vnode based on table name. It should be noted that `table_prefix` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
...@@ -297,3 +297,13 @@ Provides dnode configuration information. ...@@ -297,3 +297,13 @@ Provides dnode configuration information.
| 7 | target_table | BINARY(192) | Target table | | 7 | target_table | BINARY(192) | Target table |
| 8 | watermark | BIGINT | Watermark (see stream processing documentation). It should be noted that `watermark` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 8 | watermark | BIGINT | Watermark (see stream processing documentation). It should be noted that `watermark` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 9 | trigger | INT | Method of triggering the result push (see stream processing documentation). It should be noted that `trigger` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 9 | trigger | INT | Method of triggering the result push (see stream processing documentation). It should be noted that `trigger` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
## INS_USER_PRIVILEGES
| # | **Column** | **Data Type** | **Description** |** |
| --- | :----------: | ------------ | -------------------------------------------|
| 1 | user_name | VARCHAR(24) | Username |
| 2 | privilege | VARCHAR(10) | Privilege description |
| 3 | db_name | VARCHAR(65) | Database name |
| 4 | table_name | VARCHAR(193) | Table name |
| 5 | condition | VARCHAR(49152) | The privilege filter for child tables |
...@@ -22,6 +22,14 @@ SHOW CLUSTER; ...@@ -22,6 +22,14 @@ SHOW CLUSTER;
Shows information about the current cluster. Shows information about the current cluster.
## SHOW CLUSTER ALIVE
```sql
SHOW CLUSTER ALIVE;
```
It is used to check whether the cluster is available or not. Return value: 0 means unavailable, 1 means available, 2 means partially available (some dnodes are offline, the other dnodes are available)
## SHOW CONNECTIONS ## SHOW CONNECTIONS
```sql ```sql
......
...@@ -4,7 +4,7 @@ sidebar_label: Access Control ...@@ -4,7 +4,7 @@ sidebar_label: Access Control
description: This document describes how to manage users and permissions in TDengine. description: This document describes how to manage users and permissions in TDengine.
--- ---
This document describes how to manage permissions in TDengine. User and Access control is a distingguished feature of TDengine enterprise edition. In this section, only the most fundamental functionalities of user and access control are demonstrated. To get the full knowledge of user and access control, please contact the TDengine team.
## Create a User ## Create a User
......
...@@ -19,6 +19,9 @@ index_option: ...@@ -19,6 +19,9 @@ index_option:
functions: functions:
function [, function] ... function [, function] ...
``` ```
### tag Indexing
[tag index](../tag-index)
### SMA Indexing ### SMA Indexing
......
...@@ -17,7 +17,7 @@ You can use the SHOW CONNECTIONS statement to find the conn_id. ...@@ -17,7 +17,7 @@ You can use the SHOW CONNECTIONS statement to find the conn_id.
## Terminate a Query ## Terminate a Query
```sql ```sql
KILL QUERY kill_id; KILL QUERY 'kill_id';
``` ```
You can use the SHOW QUERIES statement to find the kill_id. You can use the SHOW QUERIES statement to find the kill_id.
......
...@@ -79,6 +79,12 @@ Parameter Description: ...@@ -79,6 +79,12 @@ Parameter Description:
- tz: Optional parameter that specifies the timezone of the returned time, following the IANA Time Zone rules, e.g. `America/New_York`. - tz: Optional parameter that specifies the timezone of the returned time, following the IANA Time Zone rules, e.g. `America/New_York`.
- req_id: Optional parameter that specifies the request id for tracing. - req_id: Optional parameter that specifies the request id for tracing.
:::note
URL Encoding. Make sure that parameters are properly encoded. For example, when specifying a timezone you must properly encode special characters. ?tz=Etc/GMT+10 will not work because the <+> plus symbol is recognized as a space in the url. It's best practice to encode all special characters in a parameter. Instead use ?tz=Etc%2FGMT%2B10 for the parameter.
:::
For example, `http://h1.taos.com:6041/rest/sql/test` is a URL to `h1.taos.com:6041` and sets the default database name to `test`. For example, `http://h1.taos.com:6041/rest/sql/test` is a URL to `h1.taos.com:6041` and sets the default database name to `test`.
TDengine supports both Basic authentication and custom authentication mechanisms, and subsequent versions will provide a standard secure digital signature mechanism for authentication. TDengine supports both Basic authentication and custom authentication mechanisms, and subsequent versions will provide a standard secure digital signature mechanism for authentication.
......
...@@ -135,7 +135,7 @@ The following describes the basic API, synchronous API, asynchronous API, subscr ...@@ -135,7 +135,7 @@ The following describes the basic API, synchronous API, asynchronous API, subscr
The base API is used to do things like create database connections and provide a runtime environment for the execution of other APIs. The base API is used to do things like create database connections and provide a runtime environment for the execution of other APIs.
- `void taos_init()` - `int taos_init()`
Initializes the runtime environment. If the API is not actively called, the driver will automatically call the API when `taos_connect()` is called, so the program generally does not need to call it manually. Initializes the runtime environment. If the API is not actively called, the driver will automatically call the API when `taos_connect()` is called, so the program generally does not need to call it manually.
...@@ -168,6 +168,12 @@ The base API is used to do things like create database connections and provide a ...@@ -168,6 +168,12 @@ The base API is used to do things like create database connections and provide a
::: :::
- `TAOS *taos_connect_auth(const char *host, const char *user, const char *auth, const char *db, uint16_t port)`
The function is the same as taos_connect. Except that the pass parameter is replaced by auth, other parameters are the same as taos_connect.
- auth: the 32-bit lowercase md5 of the raw password
- `char *taos_get_server_info(TAOS *taos)` - `char *taos_get_server_info(TAOS *taos)`
Get server-side version information. Get server-side version information.
...@@ -184,6 +190,14 @@ The base API is used to do things like create database connections and provide a ...@@ -184,6 +190,14 @@ The base API is used to do things like create database connections and provide a
- If len is less than the space required to store the db (including the last '\0'), an error is returned. The truncated data assigned in the database ends with '\0'. - If len is less than the space required to store the db (including the last '\0'), an error is returned. The truncated data assigned in the database ends with '\0'.
- If len is greater than or equal to the space required to store the db (including the last '\0'), return normal 0, and assign the db name ending with '\0' in the database. - If len is greater than or equal to the space required to store the db (including the last '\0'), return normal 0, and assign the db name ending with '\0' in the database.
- `int taos_set_notify_cb(TAOS *taos, __taos_notify_fn_t fp, void *param, int type)`
Set the event callback function.
- fp: event callback function pointer. Declaration:typedef void (*__taos_notify_fn_t)(void *param, void *ext, int type);Param is a user-defined parameter, ext is an extended parameter (depending on the event type, and returns the user password version for TAOS_NOTIFY_PASSVER), and type is the event type
- param: user-defined parameter
- type: event type. Value range: 1) TAOS_NOTIFY_PASSVER: User password changed
- `void taos_close(TAOS *taos)` - `void taos_close(TAOS *taos)`
Closes the connection, where `taos` is the handle returned by `taos_connect()`. Closes the connection, where `taos` is the handle returned by `taos_connect()`.
...@@ -307,21 +321,20 @@ The specific functions related to the interface are as follows (see also the [pr ...@@ -307,21 +321,20 @@ The specific functions related to the interface are as follows (see also the [pr
Parse a SQL command, and bind the parsed result and parameter information to `stmt`. If the parameter length is greater than 0, use this parameter as the length of the SQL command. If it is equal to 0, the length of the SQL command will be determined automatically. Parse a SQL command, and bind the parsed result and parameter information to `stmt`. If the parameter length is greater than 0, use this parameter as the length of the SQL command. If it is equal to 0, the length of the SQL command will be determined automatically.
- `int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind)` - `int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind)`
Not as efficient as `taos_stmt_bind_param_batch()`, but can support non-INSERT type SQL statements. Not as efficient as `taos_stmt_bind_param_batch()`, but can support non-INSERT type SQL statements.
To bind parameters, bind points to an array (representing the row of data to be bound), making sure that the number and order of the elements in this array are the same as the parameters in the SQL statement. taos_bind is used similarly to MYSQL_BIND in MySQL, as defined below. To bind parameters, bind points to an array (representing the row of data to be bound), making sure that the number and order of the elements in this array are the same as the parameters in the SQL statement. taos_bind is used similarly to MYSQL_BIND in MySQL, as defined below.
```c ```c
typedef struct TAOS_BIND { typedef struct TAOS_MULTI_BIND {
int buffer_type; int buffer_type;
void * buffer; void *buffer;
uintptr_t buffer_length; // not in use uintptr_t buffer_length;
uintptr_t * length; uint32_t *length;
int * is_null; char *is_null;
int is_unsigned; // not in use int num;
int * error; // not in use } TAOS_MULTI_BIND;
} TAOS_BIND;
``` ```
- `int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name)` - `int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name)`
...@@ -329,7 +342,7 @@ The specific functions related to the interface are as follows (see also the [pr ...@@ -329,7 +342,7 @@ The specific functions related to the interface are as follows (see also the [pr
(Available in 2.1.1.0 and later versions, only supported for replacing parameter values in INSERT statements) (Available in 2.1.1.0 and later versions, only supported for replacing parameter values in INSERT statements)
When the table name in the SQL command uses `? ` placeholder, you can use this function to bind a specific table name. When the table name in the SQL command uses `? ` placeholder, you can use this function to bind a specific table name.
- `int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags)` - `int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_MULTI_BIND* tags)`
(Available in 2.1.2.0 and later versions, only supported for replacing parameter values in INSERT statements) (Available in 2.1.2.0 and later versions, only supported for replacing parameter values in INSERT statements)
When the table name and TAGS in the SQL command both use `? `, you can use this function to bind the specific table name and the specific TAGS value. The most typical usage scenario is an INSERT statement that uses the automatic table building function (the current version does not support specifying specific TAGS columns.) The number of columns in the TAGS parameter needs to be the same as the number of TAGS requested in the SQL command. When the table name and TAGS in the SQL command both use `? `, you can use this function to bind the specific table name and the specific TAGS value. The most typical usage scenario is an INSERT statement that uses the automatic table building function (the current version does not support specifying specific TAGS columns.) The number of columns in the TAGS parameter needs to be the same as the number of TAGS requested in the SQL command.
...@@ -358,6 +371,14 @@ The specific functions related to the interface are as follows (see also the [pr ...@@ -358,6 +371,14 @@ The specific functions related to the interface are as follows (see also the [pr
Execute the prepared statement. Currently, a statement can only be executed once. Execute the prepared statement. Currently, a statement can only be executed once.
- `int taos_stmt_affected_rows(TAOS_STMT *stmt)`
Gets the number of rows affected by executing bind statements multiple times.
- `int taos_stmt_affected_rows_once(TAOS_STMT *stmt)`
Gets the number of rows affected by executing a bind statement once.
- `TAOS_RES* taos_stmt_use_result(TAOS_STMT *stmt)` - `TAOS_RES* taos_stmt_use_result(TAOS_STMT *stmt)`
Gets the result set of a statement. Use the result set in the same way as in the non-parametric call. When finished, `taos_free_result()` should be called on this result set to free resources. Gets the result set of a statement. Use the result set in the same way as in the non-parametric call. When finished, `taos_free_result()` should be called on this result set to free resources.
...@@ -378,7 +399,7 @@ In addition to writing data using the SQL method or the parameter binding API, w ...@@ -378,7 +399,7 @@ In addition to writing data using the SQL method or the parameter binding API, w
- `TAOS_RES* taos_schemaless_insert(TAOS* taos, const char* lines[], int numLines, int protocol, int precision)` - `TAOS_RES* taos_schemaless_insert(TAOS* taos, const char* lines[], int numLines, int protocol, int precision)`
**Function description** **Function description**
This interface writes the text data of the line protocol to TDengine. - This interface writes the text data of the line protocol to TDengine.
**Parameter description** **Parameter description**
- taos: database connection, established by the `taos_connect()` function. - taos: database connection, established by the `taos_connect()` function.
...@@ -387,12 +408,13 @@ In addition to writing data using the SQL method or the parameter binding API, w ...@@ -387,12 +408,13 @@ In addition to writing data using the SQL method or the parameter binding API, w
- protocol: the protocol type of the lines, used to identify the text data format. - protocol: the protocol type of the lines, used to identify the text data format.
- precision: precision string for the timestamp in the text data. - precision: precision string for the timestamp in the text data.
**return value** **Return value**
TAOS_RES structure, application can get error message by using `taos_errstr()` and also error code by using `taos_errno()`. - TAOS_RES structure, application can get error message by using `taos_errstr()` and also error code by using `taos_errno()`.
In some cases, the returned TAOS_RES is `NULL`, and it is still possible to call `taos_errno()` to safely get the error code information. In some cases, the returned TAOS_RES is `NULL`, and it is still possible to call `taos_errno()` to safely get the error code information.
The returned TAOS_RES needs to be freed by the caller in order to avoid memory leaks. The returned TAOS_RES needs to be freed by the caller in order to avoid memory leaks.
**Description** **Description**
The protocol type is enumerated and contains the following three formats. The protocol type is enumerated and contains the following three formats.
- TSDB_SML_LINE_PROTOCOL: InfluxDB line protocol (Line Protocol) - TSDB_SML_LINE_PROTOCOL: InfluxDB line protocol (Line Protocol)
...@@ -427,3 +449,90 @@ In addition to writing data using the SQL method or the parameter binding API, w ...@@ -427,3 +449,90 @@ In addition to writing data using the SQL method or the parameter binding API, w
- Within _raw interfaces represent data through the passed parameters lines and len. In order to solve the problem that the original interface data contains '\0' and is truncated. The totalRows pointer returns the number of parsed data rows. - Within _raw interfaces represent data through the passed parameters lines and len. In order to solve the problem that the original interface data contains '\0' and is truncated. The totalRows pointer returns the number of parsed data rows.
- Within _ttl interfaces can pass the ttl parameter to control the ttl expiration time of the table. - Within _ttl interfaces can pass the ttl parameter to control the ttl expiration time of the table.
- Within _reqid interfaces can track the entire call chain by passing the reqid parameter. - Within _reqid interfaces can track the entire call chain by passing the reqid parameter.
### Subscription API
- `int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment, int32_t *numOfAssignment)`
- `void tmq_free_assignment(tmq_topic_assignment* pAssignment)`
tmq_topic_assignment defined as follows:
```c
typedef struct tmq_topic_assignment {
int32_t vgId;
int64_t currentOffset;
int64_t begin;
int64_t end;
} tmq_topic_assignment;
```
**Function description**
- tmq_get_topic_assignment get the current vgroup information of this consumer
**Parameter description**
- numOfAssignment:the num of vgroups assigned to this consumer
- assignment:the information of vgroups, needed to be freed by tmq_free_assignment
**Return value**
- zero success,none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
- `int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId)`
**Function description**
- get the committed offset
**Return value**
- the value of committed offset, -2147467247 means no committed value, Other values less than 0 indicate failure
- `int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg)`
- `void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param)`
- `int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset)`
- `void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param)`
**Function description**
- The commit interface is divided into two types, each with synchronous and asynchronous interfaces:
- The first type: based on message submission, submit the progress in the message. If the message passes NULL, submit the current progress of all vgroups consumed by the current consumer: tmq_commit_sync/tmq_commit_async
- The second type: submit based on the offset of a Vgroup in a topic: tmq_commit_offset_sync/tmq_commit_offset_async
**Parameter description**
- msg:Message consumed, If the message passes NULL, submit the current progress of all vgroups consumed by the current consumer
**Return value**
- zero success, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
- `int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId)`
**Function description**
- Obtain the current consumption location, which is the next location of the data consumed
**Return value**
- the current consumption location, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
- `int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset)`
**Function description**
- Set the offset position of the consumer in a Vgroup of a certain topic to start consumption
**Return value**
- zero success, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
- `int32_t int64_t tmq_get_vgroup_offset(TAOS_RES* res)`
**Function description**
- Obtain the starting offset of the consumed data
**Parameter description**
- msg:Message consumed
**Return value**
- the starting offset of the consumed data, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
- `int32_t int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics)`
**Function description**
- Obtain a list of topics subscribed by consumers
**Parameter description**
- topics: a list of topics subscribed by consumers,need to be freed by tmq_list_destroy
**Return value**
- zero success,none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
...@@ -36,6 +36,7 @@ REST connection supports all platforms that can run Java. ...@@ -36,6 +36,7 @@ REST connection supports all platforms that can run Java.
| taos-jdbcdriver version | major changes | TDengine version | | taos-jdbcdriver version | major changes | TDengine version |
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: | | :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: |
| 3.2.5 | Subscription add committed() and assignment() method | 3.1.0.3 or later |
| 3.2.4 | Subscription add the enable.auto.commit parameter and the unsubscribe() method in the WebSocket connection | - | | 3.2.4 | Subscription add the enable.auto.commit parameter and the unsubscribe() method in the WebSocket connection | - |
| 3.2.3 | Fixed resultSet data parsing failure in some cases | - | | 3.2.3 | Fixed resultSet data parsing failure in some cases | - |
| 3.2.2 | Subscription add seek function | 3.0.5.0 or later | | 3.2.2 | Subscription add seek function | 3.0.5.0 or later |
...@@ -1019,14 +1020,19 @@ while(true) { ...@@ -1019,14 +1020,19 @@ while(true) {
#### Assignment subscription Offset #### Assignment subscription Offset
```java ```java
// get topicPartition
Set<TopicPartition> assignment() throws SQLException;
// get offset // get offset
long position(TopicPartition partition) throws SQLException; long position(TopicPartition partition) throws SQLException;
Map<TopicPartition, Long> position(String topic) throws SQLException; Map<TopicPartition, Long> position(String topic) throws SQLException;
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException; Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException; Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
Map<TopicPartition, OffsetAndMetadata> committed(Set<TopicPartition> partitions) throws SQLException;
// Overrides the fetch offsets that the consumer will use on the next poll(timeout). // Overrides the fetch offsets that the consumer will use on the next poll(timeout).
void seek(TopicPartition partition, long offset) throws SQLException; void seek(TopicPartition partition, long offset) throws SQLException;
void seekToBeginning(Collection<TopicPartition> partitions) throws SQLException;
void seekToEnd(Collection<TopicPartition> partitions) throws SQLException;
``` ```
Example usage is as follows. Example usage is as follows.
...@@ -1052,6 +1058,18 @@ try (TaosConsumer<ResultBean> consumer = new TaosConsumer<>(properties)) { ...@@ -1052,6 +1058,18 @@ try (TaosConsumer<ResultBean> consumer = new TaosConsumer<>(properties)) {
} }
``` ```
#### Commit offset
If `enable.auto.commit` is false, offset can be submitted manually.
```java
void commitSync() throws SQLException;
void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets) throws SQLException;
// async commit only support jni connection
void commitAsync(OffsetCommitCallback<V> callback) throws SQLException;
void commitAsync(Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback<V> callback) throws SQLException;
```
#### Close subscriptions #### Close subscriptions
```java ```java
...@@ -1091,8 +1109,6 @@ public abstract class ConsumerLoop { ...@@ -1091,8 +1109,6 @@ public abstract class ConsumerLoop {
config.setProperty("client.id", "1"); config.setProperty("client.id", "1");
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer"); config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
config.setProperty("value.deserializer.encoding", "UTF-8"); config.setProperty("value.deserializer.encoding", "UTF-8");
config.setProperty("experimental.snapshot.enable", "true");
this.consumer = new TaosConsumer<>(config); this.consumer = new TaosConsumer<>(config);
this.topics = Collections.singletonList("topic_speed"); this.topics = Collections.singletonList("topic_speed");
...@@ -1176,7 +1192,6 @@ public abstract class ConsumerLoop { ...@@ -1176,7 +1192,6 @@ public abstract class ConsumerLoop {
config.setProperty("client.id", "1"); config.setProperty("client.id", "1");
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer"); config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
config.setProperty("value.deserializer.encoding", "UTF-8"); config.setProperty("value.deserializer.encoding", "UTF-8");
config.setProperty("experimental.snapshot.enable", "true");
this.consumer = new TaosConsumer<>(config); this.consumer = new TaosConsumer<>(config);
this.topics = Collections.singletonList("topic_speed"); this.topics = Collections.singletonList("topic_speed");
......
...@@ -373,7 +373,7 @@ conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (locat ...@@ -373,7 +373,7 @@ conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (locat
<TabItem value="websocket" label="WebSocket connection"> <TabItem value="websocket" label="WebSocket connection">
```python ```python
conn = taosws.connect(url="ws://localhost:6041") conn = taosws.connect("taosws://localhost:6041")
# Execute a sql, ignore the result set, just get affected rows. It's useful for DDL and DML statement. # Execute a sql, ignore the result set, just get affected rows. It's useful for DDL and DML statement.
conn.execute("DROP DATABASE IF EXISTS test") conn.execute("DROP DATABASE IF EXISTS test")
conn.execute("CREATE DATABASE test") conn.execute("CREATE DATABASE test")
......
...@@ -30,6 +30,10 @@ The source code of `TDengine.Connector` is hosted on [GitHub](https://github.com ...@@ -30,6 +30,10 @@ The source code of `TDengine.Connector` is hosted on [GitHub](https://github.com
The supported platforms are the same as those supported by the TDengine client driver. The supported platforms are the same as those supported by the TDengine client driver.
:::note
Please note TDengine does not support 32bit Windows any more.
:::
## Version support ## Version support
Please refer to [version support list](/reference/connector#version-support) Please refer to [version support list](/reference/connector#version-support)
......
---
toc_max_heading_level: 4
sidebar_label: R
title: R Language Connector
---
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
import Rdemo from "../../07-develop/01-connect/_connect_r.mdx"
By using the RJDBC library in R, you can enable R programs to access TDengine data. Here are the installation process, configuration steps, and an example code in R.
## Installation Process
Before getting started, make sure you have installed the R language environment. Then, follow these steps to install and configure the RJDBC library:
1. Install Java Development Kit (JDK): RJDBC library requires Java environment. Download the appropriate JDK for your operating system from the official Oracle website and follow the installation guide.
2. Install the RJDBC library: Execute the following command in the R console to install the RJDBC library.
```r
install.packages("RJDBC", repos='http://cran.us.r-project.org')
```
:::note
1. The default R language package version 4.2 which shipped with Ubuntu might lead unresponsive bug. Please install latest version of R language package from the [official website](https://www.r-project.org/).
2. On Linux systems, installing the RJDBC package may require installing the necessary components for compilation. For example, on Ubuntu, you can execute the command ``apt install -y libbz2-dev libpcre2-dev libicu-dev`` to install the required components.
3. On Windows systems, you need to set the **JAVA_HOME** environment variable.
:::
3. Download the TDengine JDBC driver: Visit the Maven website and download the TDengine JDBC driver (taos-jdbcdriver-X.X.X-dist.jar) to your local machine.
## Configuration Process
Once you have completed the installation steps, you need to do some configuration to enable the RJDBC library to connect and access the TDengine time-series database.
1. Load the RJDBC library and other necessary libraries in your R script:
```r
library(DBI)
library(rJava)
library(RJDBC)
```
2. Set the JDBC driver and JDBC URL:
```r
# Set the JDBC driver path (specify the location on your local machine)
driverPath <- "/path/to/taos-jdbcdriver-X.X.X-dist.jar"
# Set the JDBC URL (specify the FQDN and credentials of your TDengine cluster)
url <- "jdbc:TAOS://localhost:6030/?user=root&password=taosdata"
```
3. Load the JDBC driver:
```r
# Load the JDBC driver
drv <- JDBC("com.taosdata.jdbc.TSDBDriver", driverPath)
```
4. Create a TDengine database connection:
```r
# Create a database connection
conn <- dbConnect(drv, url)
```
5. Once the connection is established, you can use the ``conn`` object for various database operations such as querying data and inserting data.
6. Finally, don't forget to close the database connection after you are done:
```r
# Close the database connection
dbDisconnect(conn)
```
## Example Code Using RJDBC in R
Here's an example code that uses the RJDBC library to connect to a TDengine time-series database and perform a query operation:
<Rdemo/>
Please modify the JDBC driver, JDBC URL, username, password, and SQL query statement according to your specific TDengine time-series database environment and requirements.
By following the steps and using the provided example code, you can use the RJDBC library in the R language to access the TDengine time-series database and perform tasks such as data querying and analysis.
...@@ -102,6 +102,8 @@ Usage: taosdump [OPTION...] dbname [tbname ...] ...@@ -102,6 +102,8 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
-L, --loose-mode Use loose mode if the table name and column name -L, --loose-mode Use loose mode if the table name and column name
use letter and number only. Default is NOT. use letter and number only. Default is NOT.
-n, --no-escape No escape char '`'. Default is using it. -n, --no-escape No escape char '`'. Default is using it.
-Q, --dot-replace Repalce dot character with underline character in
the table name.
-T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is -T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is
8. 8.
-C, --cloud=CLOUD_DSN specify a DSN to access TDengine cloud service -C, --cloud=CLOUD_DSN specify a DSN to access TDengine cloud service
......
...@@ -13,7 +13,7 @@ After TDengine starts, it automatically writes many metrics in specific interval ...@@ -13,7 +13,7 @@ After TDengine starts, it automatically writes many metrics in specific interval
To deploy TDinsight, we need To deploy TDinsight, we need
- a single-node TDengine server or a multi-node TDengine cluster and a [Grafana] server are required. This dashboard requires TDengine 3.0.1.0 and above, with the monitoring feature enabled. For detailed configuration, please refer to [TDengine monitoring configuration](../config/#monitoring-parameters). - a single-node TDengine server or a multi-node TDengine cluster and a [Grafana] server are required. This dashboard requires TDengine 3.0.1.0 and above, with the monitoring feature enabled. For detailed configuration, please refer to [TDengine monitoring configuration](../config/#monitoring-parameters).
- taosAdapter has been installed and running, please refer to [taosAdapter](../taosadapter). - taosAdapter has been installed and running, please refer to [taosAdapter](../taosadapter).
- taosKeeper has been installed and running, please refer to [taosKeeper](../taosKeeper). - taosKeeper has been installed and running, please note the monitor-related items in taos.cfg file need be configured. Refer to [taosKeeper](../taosKeeper) for details.
Please record Please record
- The endpoint of taosAdapter REST service, for example `http://tdengine.local:6041` - The endpoint of taosAdapter REST service, for example `http://tdengine.local:6041`
...@@ -80,7 +80,7 @@ chmod +x TDinsight.sh ...@@ -80,7 +80,7 @@ chmod +x TDinsight.sh
./TDinsight.sh ./TDinsight.sh
``` ```
This script will automatically download the latest [Grafana TDengine data source plugin](https://github.com/taosdata/grafanaplugin/releases/latest) and [TDinsight dashboard](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsightV3.json) with configurable parameters for command-line options to the [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) configuration file to automate deployment and updates, etc. With the alert setting options provided by this script, you can also get built-in support for AliCloud SMS alert notifications. This script will automatically download the latest [Grafana TDengine data source plugin](https://github.com/taosdata/grafanaplugin/releases/latest) and [TDinsight dashboard](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsightV3.json) with configurable parameters for command-line options to the [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) configuration file to automate deployment and updates, etc.
Assume you use TDengine and Grafana's default services on the same host. Run `. /TDinsight.sh` and open the Grafana browser window to see the TDinsight dashboard. Assume you use TDengine and Grafana's default services on the same host. Run `. /TDinsight.sh` and open the Grafana browser window to see the TDinsight dashboard.
...@@ -112,9 +112,6 @@ Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 syste ...@@ -112,9 +112,6 @@ Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 syste
-i, --tdinsight-uid <string> Replace with a non-space ASCII code as the dashboard id. [default: tdinsight] -i, --tdinsight-uid <string> Replace with a non-space ASCII code as the dashboard id. [default: tdinsight]
-t, --tdinsight-title <string> Dashboard title. [default: TDinsight] -t, --tdinsight-title <string> Dashboard title. [default: TDinsight]
-e, --tdinsight-editable If the provisioning dashboard could be editable. [default: false] -e, --tdinsight-editable If the provisioning dashboard could be editable. [default: false]
-E, --external-notifier <string> Apply external notifier uid to TDinsight dashboard.
``` ```
Most command-line options can take effect the same as environment variables. Most command-line options can take effect the same as environment variables.
...@@ -132,7 +129,10 @@ Most command-line options can take effect the same as environment variables. ...@@ -132,7 +129,10 @@ Most command-line options can take effect the same as environment variables.
| -i | --tdinsight-uid | TDINSIGHT_DASHBOARD_UID | TDinsight `uid` of the dashboard. [default: tdinsight] | | -i | --tdinsight-uid | TDINSIGHT_DASHBOARD_UID | TDinsight `uid` of the dashboard. [default: tdinsight] |
| -t | --tdinsight-title | TDINSIGHT_DASHBOARD_TITLE | TDinsight dashboard title. [Default: TDinsight] | -e | -tdinsight-title | -t | --tdinsight-title | TDINSIGHT_DASHBOARD_TITLE | TDinsight dashboard title. [Default: TDinsight] | -e | -tdinsight-title
| -e | --tdinsight-editable | TDINSIGHT_DASHBOARD_EDITABLE | If the dashboard is configured to be editable. [Default: false] | -e | --external | -e | --tdinsight-editable | TDINSIGHT_DASHBOARD_EDITABLE | If the dashboard is configured to be editable. [Default: false] | -e | --external
| -E | --external-notifier | EXTERNAL_NOTIFIER | Apply the external notifier uid to the TDinsight dashboard. | -s
:::note
The `-E` option is deprecated. We use Grafana unified alerting function instead.
:::
Suppose you start a TDengine database on host `tdengine` with HTTP API port `6041`, user `root1`, and password `pass5ord`. Execute the script. Suppose you start a TDengine database on host `tdengine` with HTTP API port `6041`, user `root1`, and password `pass5ord`. Execute the script.
...@@ -140,18 +140,6 @@ Suppose you start a TDengine database on host `tdengine` with HTTP API port `604 ...@@ -140,18 +140,6 @@ Suppose you start a TDengine database on host `tdengine` with HTTP API port `604
sudo . /TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord sudo . /TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord
``` ```
We provide a "-E" option to configure TDinsight to use the existing Notification Channel from the command line. Assuming your Grafana user and password is `admin:admin`, use the following command to get the `uid` of an existing notification channel.
```bash
curl --no-progress-meter -u admin:admin http://localhost:3000/api/alert-notifications | jq
```
Use the `uid` value obtained above as `-E` input.
```bash
./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
```
If you want to monitor multiple TDengine clusters, you need to set up numerous TDinsight dashboards. Setting up non-default TDinsight requires some changes: the `-n` `-i` `-t` options need to be changed to non-default names, and `-N` and `-L` should also be changed if using the built-in SMS alerting feature. If you want to monitor multiple TDengine clusters, you need to set up numerous TDinsight dashboards. Setting up non-default TDinsight requires some changes: the `-n` `-i` `-t` options need to be changed to non-default names, and `-N` and `-L` should also be changed if using the built-in SMS alerting feature.
```bash ```bash
......
...@@ -81,6 +81,14 @@ For example: ...@@ -81,6 +81,14 @@ For example:
taos -h h1.taos.com -s "use db; show tables;" taos -h h1.taos.com -s "use db; show tables;"
``` ```
## Export query results to a file
- You can use ">>" to export the query results to a file, the syntax is like `select * from table >> file`. If there is only file name without path, the file will be generated under the current working directory of TDegnine CLI.
## Import data from CSV file
- You can use `insert into table_name file 'fileName'` to import the data from the specified file into the specified table. For example, `insert into d0 file '/root/d0.csv';` means importing the data in file "/root/d0.csv" into table "d0". If there is only file name without path, that means the file is located under current working directory of TDengine CLI.
## TDengine CLI tips ## TDengine CLI tips
- You can use the up and down keys to iterate the history of commands entered - You can use the up and down keys to iterate the history of commands entered
...@@ -89,3 +97,5 @@ taos -h h1.taos.com -s "use db; show tables;" ...@@ -89,3 +97,5 @@ taos -h h1.taos.com -s "use db; show tables;"
- Execute `RESET QUERY CACHE` to clear the local cache of the table schema - Execute `RESET QUERY CACHE` to clear the local cache of the table schema
- Execute SQL statements in batches. You can store a series of shell commands (ending with ;, one line for each SQL command) in a script file and execute the command `source <file-name>` in the TDengine CLI to execute all SQL commands in that file automatically - Execute SQL statements in batches. You can store a series of shell commands (ending with ;, one line for each SQL command) in a script file and execute the command `source <file-name>` in the TDengine CLI to execute all SQL commands in that file automatically
- Enter `q` to exit TDengine CLI - Enter `q` to exit TDengine CLI
...@@ -7,10 +7,10 @@ description: This document describes the supported platforms for the TDengine se ...@@ -7,10 +7,10 @@ description: This document describes the supported platforms for the TDengine se
| | **Windows Server 2016/2019** | **Windows 10/11** | **CentOS 7.9/8** | **Ubuntu 18 or later** | **macOS** | | | **Windows Server 2016/2019** | **Windows 10/11** | **CentOS 7.9/8** | **Ubuntu 18 or later** | **macOS** |
| ------------ | ---------------------------- | ----------------- | ---------------- | ---------------- | --------- | | ------------ | ---------------------------- | ----------------- | ---------------- | ---------------- | --------- |
| X64 | ● | ● | ● | ● | ● | | X64 | ●/E | ●/E | ● | ● | ● |
| ARM64 | | | ● | | ● | | ARM64 | | | ● | | ● |
Note: ● means officially tested and verified, ○ means unofficially tested and verified. Note: 1) ● means officially tested and verified, ○ means unofficially tested and verified, E means only supported by the enterprise edition. 2) The community edition only supports newer versions of mainstream operating systems, including Ubuntu 18+/CentOS 7+/RetHat/Debian/CoreOS/FreeBSD/OpenSUSE/SUSE Linux/Fedora/macOS, etc. If you have requirements for other operating systems and editions, please contact support of the enterprise edition.
## List of supported platforms for TDengine clients and connectors ## List of supported platforms for TDengine clients and connectors
......
label: TDengine Docker images
\ No newline at end of file
...@@ -670,6 +670,15 @@ The charset that takes effect is UTF-8. ...@@ -670,6 +670,15 @@ The charset that takes effect is UTF-8.
| Value Range | 0: not consistent; 1: consistent. | | Value Range | 0: not consistent; 1: consistent. |
| Default | 0 | | Default | 0 |
### smlTsDefaultName
| Attribute | Description |
| -------- | -------------------------------------------------------- |
| Applicable | Client only |
| Meaning | The name of the time column for schemaless automatic table creation is set through this configuration |
| Type | String |
| Default Value | _ts |
## Compress Parameters ## Compress Parameters
### compressMsgSize ### compressMsgSize
......
...@@ -32,9 +32,31 @@ All data in tag_set is automatically converted to the NCHAR data type and does n ...@@ -32,9 +32,31 @@ All data in tag_set is automatically converted to the NCHAR data type and does n
In the schemaless writing data line protocol, each data item in the field_set needs to be described with its data type. Let's explain in detail: In the schemaless writing data line protocol, each data item in the field_set needs to be described with its data type. Let's explain in detail:
- If there are English double quotes on both sides, it indicates the BINARY(32) type. For example, `"abc"`. - If there are English double quotes on both sides, it indicates the VARCHAR type. For example, `"abc"`.
- If there are double quotes on both sides and an L prefix, it means NCHAR(32) type. For example, `L"error message"`. - If there are double quotes on both sides and a L/l prefix, it means NCHAR type. For example, `L"error message"`.
- Spaces, equal signs (=), commas (,), and double quotes (") need to be escaped with a backslash (\\) in front. (All refer to the ASCII character) - If there are double quotes on both sides and a G/g prefix, it means GEOMETRY type. For example `G"Point(4.343 89.342)"`.
- If there are double quotes on both sides and a B/b prefix, it means VARBINARY type. Hexadecimal start with \x or string can be used in double quotes. For example `B"\x98f46e"` `B"hello"`.
- Spaces, equals sign (=), comma (,), double quote ("), and backslash (\\) need to be escaped with a backslash (\\) in front. (All refer to the ASCII character). The rules are as follows:
| **Serial number** | **Element** | **Escape characters** |
| -------- | ----------- | ----------------------------- |
| 1 | Measurement | Comma, Space |
| 2 | Tag key | Comma, Equals Sign, Space |
| 3 | Tag value | Comma, Equals Sign, Space |
| 4 | Field key | Comma, Equals Sign, Space |
| 5 | Field value | Double quote, Backslash |
With two contiguous backslashes, the first is interpreted as an escape character. Examples of backslash escape rules are as follows:
| **Serial number** | **Backslashes** | **Interpreted as** |
| -------- | ----------- | ----------------------------- |
| 1 | \ | \ |
| 2 | \\\\ | \ |
| 3 | \\\\\\ | \\\\ |
| 4 | \\\\\\\\ | \\\\ |
| 5 | \\\\\\\\\\ | \\\\\\ |
| 6 | \\\\\\\\\\\\ | \\\\\\ |
- Numeric types will be distinguished from data types by the suffix. - Numeric types will be distinguished from data types by the suffix.
| **Serial number** | **Postfix** | **Mapping type** | **Size (bytes)** | | **Serial number** | **Postfix** | **Mapping type** | **Size (bytes)** |
...@@ -88,7 +110,9 @@ You can configure smlChildTableName in taos.cfg to specify table names, for exam ...@@ -88,7 +110,9 @@ You can configure smlChildTableName in taos.cfg to specify table names, for exam
8. It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur. 8. It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.
Note: TDengine 3.0.3.0 and later automatically detect whether order is consistent. This parameter is no longer used. Note: TDengine 3.0.3.0 and later automatically detect whether order is consistent. This parameter is no longer used.
9. Due to the fact that SQL table names do not support period (.), schemaless has also processed period (.). If there is a period (.) in the table name automatically created by schemaless, it will be automatically replaced with an underscore (\_). If you manually specify a sub table name, if there is a dot (.) in the sub table name, it will also be converted to an underscore (\_)
10. Taos.cfg adds the configuration of smlTsDefaultName (with a string value), which only works on the client side. After configuration, the time column name of the schemaless automatic table creation can be set through this configuration. If not configured, defaults to _ts.
11. Super table name or child table name are case sensitive.
:::tip :::tip
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48 KB(64 KB since version 3.0.5.0) and the total length of a tag value cannot exceed 16 KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area. All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48 KB(64 KB since version 3.0.5.0) and the total length of a tag value cannot exceed 16 KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
::: :::
......
...@@ -74,7 +74,7 @@ grafana-cli plugins install tdengine-datasource ...@@ -74,7 +74,7 @@ grafana-cli plugins install tdengine-datasource
sudo -u grafana grafana-cli plugins install tdengine-datasource sudo -u grafana grafana-cli plugins install tdengine-datasource
``` ```
You can also download zip files from [GitHub](https://github.com/taosdata/grafanaplugin/releases/tag/latest) or [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) and install manually. The commands are as follows: You can also download zip files from [GitHub](https://github.com/taosdata/grafanaplugin/releases/latest) or [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) and install manually. The commands are as follows:
```bash ```bash
GF_VERSION=3.3.1 GF_VERSION=3.3.1
...@@ -218,11 +218,11 @@ The example to query the average system memory usage for the specified interval ...@@ -218,11 +218,11 @@ The example to query the average system memory usage for the specified interval
### Importing the Dashboard ### Importing the Dashboard
You can install TDinsight dashboard in data source configuration page (like `http://localhost:3000/datasources/edit/1/dashboards`) as a monitoring visualization tool for TDengine cluster. Ensure that you use TDinsight for 3.x. You can install TDinsight dashboard in data source configuration page (like `http://localhost:3000/datasources/edit/1/dashboards`) as a monitoring visualization tool for TDengine cluster. Ensure that you use TDinsight for 3.x. Please note TDinsight for 3.x needs to configure and run taoskeeper correctly. Check the [TDinsight User Manual](/reference/tdinsight/) for the details.
![TDengine Database Grafana plugine import dashboard](./import_dashboard.webp) ![TDengine Database Grafana plugine import dashboard](./import_dashboard.webp)
A dashboard for TDengine 2.x has been published on Grafana: [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)). Check the [TDinsight User Manual](/reference/tdinsight/) for the details. A dashboard for TDengine 2.x has been published on Grafana: [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)).
For more dashboards using TDengine data source, [search here in Grafana](https://grafana.com/grafana/dashboards/?dataSource=tdengine-datasource). Here is a sub list: For more dashboards using TDengine data source, [search here in Grafana](https://grafana.com/grafana/dashboards/?dataSource=tdengine-datasource). Here is a sub list:
......
---
sidebar_label: qStudio
title: qStudio
description: Step-by-Step Guide to Accessing TDengine Data with qStudio
---
qStudio is a free cross-platform SQL data analysis tool that allows easy browsing of tables, variables, functions, and configuration settings in a database. The latest version of qStudio includes built-in support for TDengine.
## Prerequisites
To connect TDengine using qStudio, you need to complete the following preparations:
- Install qStudio: qStudio supports major operating systems, including Windows, macOS, and Linux. Please ensure you download the correct installation package for your platform from the [download page](https://www.timestored.com/qstudio/download/).
- Set up TDengine instance: Make sure TDengine is installed and running correctly, and the taosAdapter is installed and running. For detailed information, refer to the taosAdapter User Manual.
## Connecting to TDengine with qStudio
1. Launch the qStudio application and select "Server" and then "Add Server..." from the menu. Choose TDengine from the Server Type dropdown.
![qConnecting TDengine with qStudio](./qstudio/qstudio-connect-tdengine.webp)
2. Configure the TDengine connection by entering the host address, port number, username, and password. If TDengine is deployed on the local machine, you can fill in the username and password only. The default username is "root," and the default password is "taosdata." Click "Test" to test the connection's availability. If the TDengine Java connector is not installed on the local machine, qStudio will prompt you to download and install it.
![Download Java Connector](./qstudio/qstudio-jdbc-connector-download.webp)
3. Once connected successfully, the screen will display as shown below. If the connection fails, check that the TDengine service and taosAdapter are running correctly, and ensure that the host address, port number, username, and password are correct.
![Successful Connection](./qstudio/qstudio-connect-tdengine-test.webp)
4. Use qStudio to select databases and tables to browse data from the TDengine server.
![Browsing TDengine Data with qStudio](./qstudio/qstudio-browse-data.webp)
5. You can also perform operations on TDengine data by executing SQL commands.
![qStudio SQL Commands](./qstudio/qstudio-sql-execution.webp)
6. qStudio supports charting functions based on the data. For more information, please refer to the [qStudio documentation](https://www.timestored.com/qstudio/help).
![qStudio Chart](./qstudio/qstudio-chart.webp)
此差异已折叠。
...@@ -338,7 +338,7 @@ Remark: ...@@ -338,7 +338,7 @@ Remark:
Equivalent function: sum Equivalent function: sum
```sql ```sql
Select max(value) from (select first(val) value from table_name interval(10s) fill(linear)) interval(10s) Select sum(value) from (select first(val) value from table_name interval(10s) fill(linear)) interval(10s)
``` ```
Note: This function has no interpolation requirements, so it can be directly calculated. Note: This function has no interpolation requirements, so it can be directly calculated.
......
...@@ -10,6 +10,27 @@ For TDengine 2.x installation packages by version, please visit [here](https://t ...@@ -10,6 +10,27 @@ For TDengine 2.x installation packages by version, please visit [here](https://t
import Release from "/components/ReleaseV3"; import Release from "/components/ReleaseV3";
## 3.1.1.0
<Release type="tdengine" version="3.1.1.0" />
## 3.1.0.3
<Release type="tdengine" version="3.1.0.3" />
## 3.1.0.2
<Release type="tdengine" version="3.1.0.2" />
## 3.1.0.0
:::note IMPORTANT
- Once you upgrade to TDengine 3.1.0.0, you cannot roll back to any previous version of TDengine. Upgrading to 3.1.0.0 will alter your data such that it cannot be read by previous versions.
- You must remove all streams before upgrading to TDengine 3.1.0.0. If you upgrade a deployment that contains streams, the upgrade will fail and your deployment will become nonoperational.
:::
<Release type="tdengine" version="3.1.0.0" />
## 3.0.7.1 ## 3.0.7.1
<Release type="tdengine" version="3.0.7.1" /> <Release type="tdengine" version="3.0.7.1" />
......
...@@ -8,9 +8,13 @@ library("rJava") ...@@ -8,9 +8,13 @@ library("rJava")
library("RJDBC") library("RJDBC")
args<- commandArgs(trailingOnly = TRUE) args<- commandArgs(trailingOnly = TRUE)
driver_path = args[1] # path to jdbc-driver for example: "/root/taos-jdbcdriver-3.0.0-dist.jar" driver_path = args[1] # path to jdbc-driver for example: "/root/taos-jdbcdriver-3.2.4-dist.jar"
driver = JDBC("com.taosdata.jdbc.TSDBDriver", driver_path) driver = JDBC("com.taosdata.jdbc.TSDBDriver", driver_path)
conn = dbConnect(driver, "jdbc:TAOS://127.0.0.1:6030/?user=root&password=taosdata") conn = dbConnect(driver, "jdbc:TAOS://127.0.0.1:6030/?user=root&password=taosdata")
dbGetQuery(conn, "SELECT server_version()") dbGetQuery(conn, "SELECT server_version()")
dbSendUpdate(conn, "create database if not exists rtest")
dbSendUpdate(conn, "create table if not exists rtest.test (ts timestamp, current float, voltage int, devname varchar(20))")
dbSendUpdate(conn, "insert into rtest.test values (now, 1.2, 220, 'test')")
dbGetQuery(conn, "select * from rtest.test")
dbDisconnect(conn) dbDisconnect(conn)
# ANCHOR_END: demo # ANCHOR_END: demo
...@@ -2,11 +2,19 @@ if (! "RJDBC" %in% installed.packages()[, "Package"]) { ...@@ -2,11 +2,19 @@ if (! "RJDBC" %in% installed.packages()[, "Package"]) {
install.packages('RJDBC', repos='http://cran.us.r-project.org') install.packages('RJDBC', repos='http://cran.us.r-project.org')
} }
# ANCHOR: demo
library("DBI") library("DBI")
library("rJava") library("rJava")
library("RJDBC") library("RJDBC")
driver_path = "/home/debug/build/lib/taos-jdbcdriver-2.0.38-dist.jar"
args<- commandArgs(trailingOnly = TRUE)
driver_path = args[1] # path to jdbc-driver for example: "/root/taos-jdbcdriver-3.2.4-dist.jar"
driver = JDBC("com.taosdata.jdbc.rs.RestfulDriver", driver_path) driver = JDBC("com.taosdata.jdbc.rs.RestfulDriver", driver_path)
conn = dbConnect(driver, "jdbc:TAOS-RS://localhost:6041?user=root&password=taosdata") conn = dbConnect(driver, "jdbc:TAOS-RS://localhost:6041?user=root&password=taosdata")
dbGetQuery(conn, "SELECT server_version()") dbGetQuery(conn, "SELECT server_version()")
dbDisconnect(conn) dbSendUpdate(conn, "create database if not exists rtest")
\ No newline at end of file dbSendUpdate(conn, "create table if not exists rtest.test (ts timestamp, current float, voltage int, devname varchar(20))")
dbSendUpdate(conn, "insert into rtest.test values (now, 1.2, 220, 'test')")
dbGetQuery(conn, "select * from rtest.test")
dbDisconnect(conn)
# ANCHOR_END: demo
apt install -y libbz2-dev libpcre2-dev libicu-dev
...@@ -78,6 +78,7 @@ int printRow(char *str, TAOS_ROW row, TAOS_FIELD *fields, int numFields) { ...@@ -78,6 +78,7 @@ int printRow(char *str, TAOS_ROW row, TAOS_FIELD *fields, int numFields) {
} break; } break;
case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_VARBINARY:
case TSDB_DATA_TYPE_NCHAR: case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_GEOMETRY: { case TSDB_DATA_TYPE_GEOMETRY: {
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE); int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
......
...@@ -51,7 +51,7 @@ void insertData(TAOS *taos) { ...@@ -51,7 +51,7 @@ void insertData(TAOS *taos) {
int code = taos_stmt_prepare(stmt, sql, 0); int code = taos_stmt_prepare(stmt, sql, 0);
checkErrorCode(stmt, code, "failed to execute taos_stmt_prepare"); checkErrorCode(stmt, code, "failed to execute taos_stmt_prepare");
// bind table name and tags // bind table name and tags
TAOS_BIND tags[2]; TAOS_MULTI_BIND tags[2];
char *location = "California.SanFrancisco"; char *location = "California.SanFrancisco";
int groupId = 2; int groupId = 2;
tags[0].buffer_type = TSDB_DATA_TYPE_BINARY; tags[0].buffer_type = TSDB_DATA_TYPE_BINARY;
...@@ -144,4 +144,4 @@ int main() { ...@@ -144,4 +144,4 @@ int main() {
} }
// output: // output:
// successfully inserted 2 rows // successfully inserted 2 rows
\ No newline at end of file
...@@ -76,6 +76,7 @@ int printRow(char *str, TAOS_ROW row, TAOS_FIELD *fields, int numFields) { ...@@ -76,6 +76,7 @@ int printRow(char *str, TAOS_ROW row, TAOS_FIELD *fields, int numFields) {
} break; } break;
case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_VARBINARY:
case TSDB_DATA_TYPE_NCHAR: case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_GEOMETRY: { case TSDB_DATA_TYPE_GEOMETRY: {
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE); int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
......
...@@ -58,7 +58,7 @@ void insertData(TAOS *taos) { ...@@ -58,7 +58,7 @@ void insertData(TAOS *taos) {
int code = taos_stmt_prepare(stmt, sql, 0); int code = taos_stmt_prepare(stmt, sql, 0);
checkErrorCode(stmt, code, "failed to execute taos_stmt_prepare"); checkErrorCode(stmt, code, "failed to execute taos_stmt_prepare");
// bind table name and tags // bind table name and tags
TAOS_BIND tags[2]; TAOS_MULTI_BIND tags[2];
char* location = "California.SanFrancisco"; char* location = "California.SanFrancisco";
int groupId = 2; int groupId = 2;
tags[0].buffer_type = TSDB_DATA_TYPE_BINARY; tags[0].buffer_type = TSDB_DATA_TYPE_BINARY;
...@@ -82,7 +82,7 @@ void insertData(TAOS *taos) { ...@@ -82,7 +82,7 @@ void insertData(TAOS *taos) {
{1648432611749, 12.6, 218, 0.33}, {1648432611749, 12.6, 218, 0.33},
}; };
TAOS_BIND values[4]; TAOS_MULTI_BIND values[4];
values[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP; values[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
values[0].buffer_length = sizeof(int64_t); values[0].buffer_length = sizeof(int64_t);
values[0].length = &values[0].buffer_length; values[0].length = &values[0].buffer_length;
...@@ -138,4 +138,4 @@ int main() { ...@@ -138,4 +138,4 @@ int main() {
// output: // output:
// successfully inserted 2 rows // successfully inserted 2 rows
\ No newline at end of file
...@@ -227,12 +227,6 @@ tmq_t* build_consumer() { ...@@ -227,12 +227,6 @@ tmq_t* build_consumer() {
return NULL; return NULL;
} }
code = tmq_conf_set(conf, "experimental.snapshot.enable", "false");
if (TMQ_CONF_OK != code) {
tmq_conf_destroy(conf);
return NULL;
}
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL); tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
tmq_t* tmq = tmq_consumer_new(conf, NULL, 0); tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
......
...@@ -35,7 +35,6 @@ func main() { ...@@ -35,7 +35,6 @@ func main() {
"td.connect.port": "6030", "td.connect.port": "6030",
"client.id": "test_tmq_client", "client.id": "test_tmq_client",
"enable.auto.commit": "false", "enable.auto.commit": "false",
"experimental.snapshot.enable": "true",
"msg.with.table.name": "true", "msg.with.table.name": "true",
}) })
if err != nil { if err != nil {
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
<dependency> <dependency>
<groupId>com.taosdata.jdbc</groupId> <groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId> <artifactId>taos-jdbcdriver</artifactId>
<version>3.2.1</version> <version>3.2.4</version>
</dependency> </dependency>
<!-- ANCHOR_END: dep--> <!-- ANCHOR_END: dep-->
<dependency> <dependency>
...@@ -33,4 +33,4 @@ ...@@ -33,4 +33,4 @@
</dependency> </dependency>
</dependencies> </dependencies>
</project> </project>
\ No newline at end of file
...@@ -10,7 +10,7 @@ TDengine 充分利用了时序数据的特点,提出了“一个数据采集 ...@@ -10,7 +10,7 @@ TDengine 充分利用了时序数据的特点,提出了“一个数据采集
如果你是开发工程师,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要复制粘贴示例代码,针对自己的应用稍作改动,就能跑起来。 如果你是开发工程师,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要复制粘贴示例代码,针对自己的应用稍作改动,就能跑起来。
我们已经生活在大数据时代,纵向扩展已经无法满足日益增长的业务需求,任何系统都必须具有水平扩展的能力,集群成为大数据以及 Database 系统的不可缺失功能。TDengine 团队不仅实现了集群功能,而且将这一重要核心功能开源。怎么部署、管理和维护 TDengine 集群,请仔细参考[部署集群](./deployment)一章。 我们已经生活在大数据时代,纵向扩展已经无法满足日益增长的业务需求,任何系统都必须具有水平扩展的能力,集群成为大数据以及 Database 系统的不可缺失功能。TDengine 团队不仅实现了集群功能,而且将这一重要核心功能开源。怎么部署、管理和维护 TDengine 集群,请仔细参考[部署集群]一章。
TDengine 采用 SQL 作为查询语言,大大降低学习成本、降低迁移成本,但同时针对时序数据场景,又做了一些扩展,以支持插值、降采样、时间加权平均等操作。[SQL 手册](./taos-sql)一章详细描述了 SQL 语法、详细列出了各种支持的命令和函数。 TDengine 采用 SQL 作为查询语言,大大降低学习成本、降低迁移成本,但同时针对时序数据场景,又做了一些扩展,以支持插值、降采样、时间加权平均等操作。[SQL 手册](./taos-sql)一章详细描述了 SQL 语法、详细列出了各种支持的命令和函数。
...@@ -18,8 +18,6 @@ TDengine 采用 SQL 作为查询语言,大大降低学习成本、降低迁移 ...@@ -18,8 +18,6 @@ TDengine 采用 SQL 作为查询语言,大大降低学习成本、降低迁移
如果你对 TDengine 的外围工具、REST API、各种编程语言的连接器(Connector)想做更多详细了解,请看[参考指南](./reference)一章。 如果你对 TDengine 的外围工具、REST API、各种编程语言的连接器(Connector)想做更多详细了解,请看[参考指南](./reference)一章。
如果你对 TDengine 的内部架构设计很有兴趣,欢迎仔细阅读[技术内幕](./tdinternal)一章,里面对集群的设计、数据分区、分片、写入、读出、查询、聚合查询的流程都做了详细的介绍。如果你想研读 TDengine 代码甚至贡献代码,请一定仔细读完这一章。
最后,作为一个开源软件,欢迎大家的参与。如果发现文档有任何错误、描述不清晰的地方,请在每个页面的最下方,点击“编辑本文档”直接进行修改。 最后,作为一个开源软件,欢迎大家的参与。如果发现文档有任何错误、描述不清晰的地方,请在每个页面的最下方,点击“编辑本文档”直接进行修改。
Together, we make a difference! Together, we make a difference!
...@@ -6,7 +6,14 @@ toc_max_heading_level: 2 ...@@ -6,7 +6,14 @@ toc_max_heading_level: 2
TDengine 是一款开源、高性能、云原生的[时序数据库](https://tdengine.com/tsdb/),且针对物联网、车联网、工业互联网、金融、IT 运维等场景进行了优化。TDengine 的代码,包括集群功能,都在 GNU AGPL v3.0 下开源。除核心的时序数据库功能外,TDengine 还提供[缓存](../develop/cache/)[数据订阅](../develop/tmq)[流式计算](../develop/stream)等其它功能以降低系统复杂度及研发和运维成本。 TDengine 是一款开源、高性能、云原生的[时序数据库](https://tdengine.com/tsdb/),且针对物联网、车联网、工业互联网、金融、IT 运维等场景进行了优化。TDengine 的代码,包括集群功能,都在 GNU AGPL v3.0 下开源。除核心的时序数据库功能外,TDengine 还提供[缓存](../develop/cache/)[数据订阅](../develop/tmq)[流式计算](../develop/stream)等其它功能以降低系统复杂度及研发和运维成本。
本章节介绍 TDengine 的主要功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine 有个整体的了解。 本章节介绍 TDengine 的主要产品和功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine 有个整体的了解。
## 主要产品
TDengine 有三个主要产品:TDengine Pro (即 TDengine 企业版),TDengine Cloud,和 TDengine OSS,关于它们的具体定义请参考
- [TDengine 企业版](https://www.taosdata.com/tdengine-pro)
- [TDengine 云服务](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn)
- [TDengine 开源版](https://www.taosdata.com/tdengine-oss)
## 主要功能 ## 主要功能
......
...@@ -28,6 +28,21 @@ docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043 ...@@ -28,6 +28,21 @@ docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043
注意:TDengine 3.0 服务端仅使用 6030 TCP 端口。6041 为 taosAdapter 所使用提供 REST 服务端口。6043-6049 为 taosAdapter 提供第三方应用接入所使用端口,可根据需要选择是否打开。 注意:TDengine 3.0 服务端仅使用 6030 TCP 端口。6041 为 taosAdapter 所使用提供 REST 服务端口。6043-6049 为 taosAdapter 提供第三方应用接入所使用端口,可根据需要选择是否打开。
如果需要将数据持久化到本机的某一个文件夹,则执行下边的命令:
```shell
docker run -d -v ~/data/taos/dnode/data:/var/lib/taos \
-v ~/data/taos/dnode/log:/var/log/taos \
-p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine
```
:::note
- /var/lib/taos: TDengine 默认数据文件目录。可通过[配置文件]修改位置。你可以修改~/data/taos/dnode/data为你自己的数据目录
- /var/log/taos: TDengine 默认日志文件目录。可通过[配置文件]修改位置。你可以修改~/data/taos/dnode/log为你自己的日志目录
:::
确定该容器已经启动并且在正常运行。 确定该容器已经启动并且在正常运行。
```shell ```shell
...@@ -108,4 +123,4 @@ SELECT FIRST(ts), AVG(current), MAX(voltage), MIN(phase) FROM test.d10 INTERVAL( ...@@ -108,4 +123,4 @@ SELECT FIRST(ts), AVG(current), MAX(voltage), MIN(phase) FROM test.d10 INTERVAL(
## 其它 ## 其它
更多关于在 Docker 环境下使用 TDengine 的细节,请参考 [在 Docker 下使用 TDengine](../../reference/docker) 更多关于在 Docker 环境下使用 TDengine 的细节,请参考 [用 Docker 部署 TDengine](../../deployment/docker)
...@@ -82,7 +82,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速 ...@@ -82,7 +82,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速
<dependency> <dependency>
<groupId>com.taosdata.jdbc</groupId> <groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId> <artifactId>taos-jdbcdriver</artifactId>
<version>3.2.1</version> <version>3.2.4</version>
</dependency> </dependency>
``` ```
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。