提交 7d159585 编写于 作者: wmmhello's avatar wmmhello

Merge branch 'main' of https://github.com/taosdata/TDengine into mark/tmq

...@@ -16,7 +16,6 @@ debug/ ...@@ -16,7 +16,6 @@ debug/
release/ release/
target/ target/
debs/ debs/
deps/
rpms/ rpms/
mac/ mac/
*.pyc *.pyc
......
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v2.3.0
hooks:
- id: check-yaml
- id: check-json
- id: end-of-file-fixer
- id: trailing-whitespace
repos:
- repo: https://github.com/psf/black
rev: stable
hooks:
- id: black
repos:
- repo: https://github.com/pocc/pre-commit-hooks
rev: master
hooks:
- id: cppcheck
args: ["--error-exitcode=0"]
repos:
- repo: https://github.com/crate-ci/typos
rev: v1.15.7
hooks:
- id: typos
...@@ -15,11 +15,15 @@ SET(TD_COMMUNITY_DIR ${PROJECT_SOURCE_DIR}) ...@@ -15,11 +15,15 @@ SET(TD_COMMUNITY_DIR ${PROJECT_SOURCE_DIR})
set(TD_SUPPORT_DIR "${TD_SOURCE_DIR}/cmake") set(TD_SUPPORT_DIR "${TD_SOURCE_DIR}/cmake")
set(TD_CONTRIB_DIR "${TD_SOURCE_DIR}/contrib") set(TD_CONTRIB_DIR "${TD_SOURCE_DIR}/contrib")
include(${TD_SUPPORT_DIR}/cmake.platform) include(${TD_SUPPORT_DIR}/cmake.platform)
include(${TD_SUPPORT_DIR}/cmake.define) include(${TD_SUPPORT_DIR}/cmake.define)
include(${TD_SUPPORT_DIR}/cmake.options) include(${TD_SUPPORT_DIR}/cmake.options)
include(${TD_SUPPORT_DIR}/cmake.version) include(${TD_SUPPORT_DIR}/cmake.version)
# contrib # contrib
add_subdirectory(contrib) add_subdirectory(contrib)
......
...@@ -68,14 +68,14 @@ sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-d ...@@ -68,14 +68,14 @@ sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-d
```bash ```bash
sudo yum install epel-release sudo yum install epel-release
sudo yum update sudo yum update
sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel sudo yum install -y gcc gcc-c++ make cmake3 gflags git openssl-devel
sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
``` ```
### CentOS 8/Fedora/Rocky Linux ### CentOS 8/Fedora/Rocky Linux
```bash ```bash
sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel sudo dnf install -y gcc gcc-c++ gflags make cmake epel-release git openssl-devel
``` ```
#### 在 CentOS 上构建 taosTools 安装依赖软件 #### 在 CentOS 上构建 taosTools 安装依赖软件
...@@ -117,7 +117,7 @@ scl enable devtoolset-9 -- bash ...@@ -117,7 +117,7 @@ scl enable devtoolset-9 -- bash
### macOS ### macOS
``` ```
brew install argp-standalone pkgconfig brew install argp-standalone gflags pkgconfig
``` ```
### 设置 golang 开发环境 ### 设置 golang 开发环境
......
...@@ -76,14 +76,14 @@ sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-d ...@@ -76,14 +76,14 @@ sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-d
```bash ```bash
sudo yum install epel-release sudo yum install epel-release
sudo yum update sudo yum update
sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel sudo yum install -y gcc gcc-c++ make cmake3 gflags git openssl-devel
sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
``` ```
### CentOS 8/Fedora/Rocky Linux ### CentOS 8/Fedora/Rocky Linux
```bash ```bash
sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel sudo dnf install -y gcc gcc-c++ make cmake epel-release gflags git openssl-devel
``` ```
#### Install build dependencies for taosTools on CentOS #### Install build dependencies for taosTools on CentOS
...@@ -124,7 +124,7 @@ scl enable devtoolset-9 -- bash ...@@ -124,7 +124,7 @@ scl enable devtoolset-9 -- bash
### macOS ### macOS
``` ```
brew install argp-standalone pkgconfig brew install argp-standalone gflags pkgconfig
``` ```
### Setup golang environment ### Setup golang environment
......
...@@ -189,3 +189,9 @@ option( ...@@ -189,3 +189,9 @@ option(
"If build release version" "If build release version"
OFF OFF
) )
option(
BUILD_CONTRIB
"If build thirdpart from source"
OFF
)
...@@ -121,6 +121,12 @@ IF ("${CPUTYPE}" STREQUAL "") ...@@ -121,6 +121,12 @@ IF ("${CPUTYPE}" STREQUAL "")
SET(TD_LOONGARCH_64 TRUE) SET(TD_LOONGARCH_64 TRUE)
ADD_DEFINITIONS("-D_TD_LOONGARCH_") ADD_DEFINITIONS("-D_TD_LOONGARCH_")
ADD_DEFINITIONS("-D_TD_LOONGARCH_64") ADD_DEFINITIONS("-D_TD_LOONGARCH_64")
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "mips64")
SET(PLATFORM_ARCH_STR "mips")
MESSAGE(STATUS "input cpuType: mips64")
SET(TD_MIPS_64 TRUE)
ADD_DEFINITIONS("-D_TD_MIPS_")
ADD_DEFINITIONS("-D_TD_MIPS_64")
ENDIF () ENDIF ()
ELSE () ELSE ()
# if generate ARM version: # if generate ARM version:
...@@ -172,5 +178,17 @@ ENDIF() ...@@ -172,5 +178,17 @@ ENDIF()
MESSAGE(STATUS "Platform arch:" ${PLATFORM_ARCH_STR}) MESSAGE(STATUS "Platform arch:" ${PLATFORM_ARCH_STR})
set(TD_DEPS_DIR "x86")
if (TD_LINUX)
IF (TD_ARM_64 OR TD_ARM_32)
set(TD_DEPS_DIR "arm")
ELSEIF (TD_MIPS_64)
set(TD_DEPS_DIR "mips")
ELSE()
set(TD_DEPS_DIR "x86")
ENDIF()
endif()
MESSAGE(STATUS "DEPS_DIR" ${TD_DEPS_DIR})
MESSAGE("C Compiler: ${CMAKE_C_COMPILER} (${CMAKE_C_COMPILER_ID}, ${CMAKE_C_COMPILER_VERSION})") MESSAGE("C Compiler: ${CMAKE_C_COMPILER} (${CMAKE_C_COMPILER_ID}, ${CMAKE_C_COMPILER_VERSION})")
MESSAGE("CXX Compiler: ${CMAKE_CXX_COMPILER} (${CMAKE_C_COMPILER_ID}, ${CMAKE_CXX_COMPILER_VERSION})") MESSAGE("CXX Compiler: ${CMAKE_CXX_COMPILER} (${CMAKE_C_COMPILER_ID}, ${CMAKE_CXX_COMPILER_VERSION})")
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER) IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER}) SET(TD_VER_NUMBER ${VERNUMBER})
ELSE () ELSE ()
SET(TD_VER_NUMBER "3.0.5.0") SET(TD_VER_NUMBER "3.0.5.2.alpha")
ENDIF () ENDIF ()
IF (DEFINED VERCOMPATIBLE) IF (DEFINED VERCOMPATIBLE)
......
# rocksdb # rocksdb
#set(librocksdb_src ${TD_CONTRIB_DIR}/rocksdb) if (${BUILD_CONTRIB})
#set(librocksdb_binary ${TD_CONTRIB_DIR}/rocksdb/build) ExternalProject_Add(rocksdb
URL https://github.com/facebook/rocksdb/archive/refs/tags/v8.1.1.tar.gz
ExternalProject_Add(rocksdb URL_HASH MD5=3b4c97ee45df9c8a5517308d31ab008b
DOWNLOAD_NO_PROGRESS 1
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
SOURCE_DIR "${TD_CONTRIB_DIR}/rocksdb"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)
else()
if (NOT ${TD_LINUX})
ExternalProject_Add(rocksdb
URL https://github.com/facebook/rocksdb/archive/refs/tags/v8.1.1.tar.gz URL https://github.com/facebook/rocksdb/archive/refs/tags/v8.1.1.tar.gz
URL_HASH MD5=3b4c97ee45df9c8a5517308d31ab008b URL_HASH MD5=3b4c97ee45df9c8a5517308d31ab008b
DOWNLOAD_NO_PROGRESS 1 DOWNLOAD_NO_PROGRESS 1
...@@ -14,3 +25,5 @@ ExternalProject_Add(rocksdb ...@@ -14,3 +25,5 @@ ExternalProject_Add(rocksdb
INSTALL_COMMAND "" INSTALL_COMMAND ""
TEST_COMMAND "" TEST_COMMAND ""
) )
endif()
endif()
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
# taosadapter # taosadapter
ExternalProject_Add(taosadapter ExternalProject_Add(taosadapter
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
GIT_TAG 3.0 GIT_TAG main
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter" SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
BINARY_DIR "" BINARY_DIR ""
#BUILD_IN_SOURCE TRUE #BUILD_IN_SOURCE TRUE
......
...@@ -77,11 +77,25 @@ if(${BUILD_WITH_LEVELDB}) ...@@ -77,11 +77,25 @@ if(${BUILD_WITH_LEVELDB})
cat("${TD_SUPPORT_DIR}/leveldb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/leveldb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif(${BUILD_WITH_LEVELDB}) endif(${BUILD_WITH_LEVELDB})
# rocksdb
if(${BUILD_WITH_ROCKSDB}) if (${BUILD_CONTRIB})
if(${BUILD_WITH_ROCKSDB})
cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
add_definitions(-DUSE_ROCKSDB)
endif()
else()
if (NOT ${TD_LINUX})
if(${BUILD_WITH_ROCKSDB})
cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
add_definitions(-DUSE_ROCKSDB) add_definitions(-DUSE_ROCKSDB)
endif(${BUILD_WITH_ROCKSDB}) endif(${BUILD_WITH_ROCKSDB})
else()
if(${BUILD_WITH_ROCKSDB})
add_definitions(-DUSE_ROCKSDB)
endif(${BUILD_WITH_ROCKSDB})
endif()
endif()
# canonical-raft # canonical-raft
if(${BUILD_WITH_CRAFT}) if(${BUILD_WITH_CRAFT})
...@@ -237,10 +251,11 @@ if (${BUILD_WITH_UV}) ...@@ -237,10 +251,11 @@ if (${BUILD_WITH_UV})
endif(${TD_LINUX}) endif(${TD_LINUX})
endif (${BUILD_WITH_UV}) endif (${BUILD_WITH_UV})
if(${BUILD_WITH_ROCKSDB}) if (${BUILD_WITH_ROCKSDB})
if (${BUILD_CONTRIB})
if(${TD_LINUX}) if(${TD_LINUX})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL} -Wno-error=maybe-uninitialized -Wno-error=unused-but-set-variable -Wno-error=unused-variable -Wno-error=unused-function -Wno-errno=unused-private-field -Wno-error=unused-result") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL} -Wno-error=maybe-uninitialized -Wno-error=unused-but-set-variable -Wno-error=unused-variable -Wno-error=unused-function -Wno-errno=unused-private-field -Wno-error=unused-result")
IF ("${CMAKE_BUILD_TYPE}" STREQUAL "") if ("${CMAKE_BUILD_TYPE}" STREQUAL "")
SET(CMAKE_BUILD_TYPE Release) SET(CMAKE_BUILD_TYPE Release)
endif() endif()
endif(${TD_LINUX}) endif(${TD_LINUX})
...@@ -256,6 +271,9 @@ if(${BUILD_WITH_ROCKSDB}) ...@@ -256,6 +271,9 @@ if(${BUILD_WITH_ROCKSDB})
if (${TD_WINDOWS}) if (${TD_WINDOWS})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819")
option(WITH_JNI "" OFF)
option(WITH_MD_LIBRARY "build with MD" OFF)
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
endif(${TD_WINDOWS}) endif(${TD_WINDOWS})
...@@ -265,16 +283,49 @@ if(${BUILD_WITH_ROCKSDB}) ...@@ -265,16 +283,49 @@ if(${BUILD_WITH_ROCKSDB})
option(WITH_PERF_CONTEXT "" OFF) option(WITH_PERF_CONTEXT "" OFF)
endif(${TD_DARWIN}) endif(${TD_DARWIN})
if(${TD_WINDOWS}) option(WITH_FALLOCATE "" OFF)
option(WITH_JNI "" OFF) option(WITH_JEMALLOC "" OFF)
endif(${TD_WINDOWS}) option(WITH_GFLAGS "" OFF)
option(PORTABLE "" ON)
option(WITH_LIBURING "" OFF)
option(FAIL_ON_WARNINGS OFF)
if(${TD_WINDOWS}) option(WITH_TESTS "" OFF)
option(WITH_BENCHMARK_TOOLS "" OFF)
option(WITH_TOOLS "" OFF)
option(WITH_LIBURING "" OFF)
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
add_subdirectory(rocksdb EXCLUDE_FROM_ALL)
target_include_directories(
rocksdb
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/include>
)
else()
if (NOT ${TD_LINUX})
MESSAGE(STATUS "CXXXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS})
if(${TD_DARWIN})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
endif(${TD_DARWIN})
if (${TD_DARWIN_ARM64})
set(HAS_ARMV8_CRC true)
endif(${TD_DARWIN_ARM64})
if (${TD_WINDOWS})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819")
option(WITH_JNI "" OFF)
option(WITH_MD_LIBRARY "build with MD" OFF) option(WITH_MD_LIBRARY "build with MD" OFF)
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib) set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
endif(${TD_WINDOWS}) endif(${TD_WINDOWS})
if(${TD_DARWIN})
option(HAVE_THREAD_LOCAL "" OFF)
option(WITH_IOSTATS_CONTEXT "" OFF)
option(WITH_PERF_CONTEXT "" OFF)
endif(${TD_DARWIN})
option(WITH_FALLOCATE "" OFF) option(WITH_FALLOCATE "" OFF)
option(WITH_JEMALLOC "" OFF) option(WITH_JEMALLOC "" OFF)
option(WITH_GFLAGS "" OFF) option(WITH_GFLAGS "" OFF)
...@@ -286,17 +337,17 @@ if(${BUILD_WITH_ROCKSDB}) ...@@ -286,17 +337,17 @@ if(${BUILD_WITH_ROCKSDB})
option(WITH_BENCHMARK_TOOLS "" OFF) option(WITH_BENCHMARK_TOOLS "" OFF)
option(WITH_TOOLS "" OFF) option(WITH_TOOLS "" OFF)
option(WITH_LIBURING "" OFF) option(WITH_LIBURING "" OFF)
IF (TD_LINUX)
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
ELSE()
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF) option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
ENDIF()
add_subdirectory(rocksdb EXCLUDE_FROM_ALL) add_subdirectory(rocksdb EXCLUDE_FROM_ALL)
target_include_directories( target_include_directories(
rocksdb rocksdb
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/include> PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/include>
) )
endif(${BUILD_WITH_ROCKSDB}) endif()
endif()
endif()
# lucene # lucene
# To support build on ubuntu: sudo apt-get install libboost-all-dev # To support build on ubuntu: sudo apt-get install libboost-all-dev
...@@ -507,6 +558,7 @@ if(${BUILD_GEOS}) ...@@ -507,6 +558,7 @@ if(${BUILD_GEOS})
endif(${TD_LINUX}) endif(${TD_LINUX})
option(BUILD_SHARED_LIBS "Build GEOS with shared libraries" OFF) option(BUILD_SHARED_LIBS "Build GEOS with shared libraries" OFF)
add_subdirectory(geos EXCLUDE_FROM_ALL) add_subdirectory(geos EXCLUDE_FROM_ALL)
unset(CMAKE_CXX_STANDARD CACHE) # undo libgeos's setting of global CMAKE_CXX_STANDARD
target_include_directories( target_include_directories(
geos_c geos_c
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/geos/include> PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/geos/include>
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
...@@ -20,6 +20,19 @@ The standard server installation package includes `taos`, `taosd`, `taosAdapter` ...@@ -20,6 +20,19 @@ The standard server installation package includes `taos`, `taosd`, `taosAdapter`
The TDengine Community Edition is released as Deb and RPM packages. The Deb package can be installed on Debian, Ubuntu, and derivative systems. The RPM package can be installed on CentOS, RHEL, SUSE, and derivative systems. A .tar.gz package is also provided for enterprise customers, and you can install TDengine over `apt-get` as well. The .tar.tz package includes `taosdump` and the TDinsight installation script. If you want to use these utilities with the Deb or RPM package, download and install taosTools separately. TDengine can also be installed on x64 Windows and x64/m1 macOS. The TDengine Community Edition is released as Deb and RPM packages. The Deb package can be installed on Debian, Ubuntu, and derivative systems. The RPM package can be installed on CentOS, RHEL, SUSE, and derivative systems. A .tar.gz package is also provided for enterprise customers, and you can install TDengine over `apt-get` as well. The .tar.tz package includes `taosdump` and the TDinsight installation script. If you want to use these utilities with the Deb or RPM package, download and install taosTools separately. TDengine can also be installed on x64 Windows and x64/m1 macOS.
## Operating environment requirements
In the Linux system, the minimum requirements for the operating environment are as follows:
linux core version - 3.10.0-1160.83.1.el7.x86_64;
glibc version - 2.17;
If compiling and installing through clone source code, it is also necessary to meet the following requirements:
cmake version - 3.26.4 or above;
gcc version - 9.3.1 or above;
## Installation ## Installation
<Tabs> <Tabs>
......
...@@ -33,7 +33,7 @@ The below SQL statement is used to insert one row into table "d1001". ...@@ -33,7 +33,7 @@ The below SQL statement is used to insert one row into table "d1001".
INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31); INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31);
``` ```
`ts1` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detial, refer to [TDengine SQL insert timestamp section](/taos-sql/insert). `ts1` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
### Insert Multiple Rows ### Insert Multiple Rows
...@@ -43,7 +43,7 @@ Multiple rows can be inserted in a single SQL statement. The example below inser ...@@ -43,7 +43,7 @@ Multiple rows can be inserted in a single SQL statement. The example below inser
INSERT INTO d1001 VALUES (ts2, 10.2, 220, 0.23) (ts2, 10.3, 218, 0.25); INSERT INTO d1001 VALUES (ts2, 10.2, 220, 0.23) (ts2, 10.3, 218, 0.25);
``` ```
`ts1` and `ts2` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detial, refer to [TDengine SQL insert timestamp section](/taos-sql/insert). `ts1` and `ts2` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
### Insert into Multiple Tables ### Insert into Multiple Tables
...@@ -53,7 +53,7 @@ Data can be inserted into multiple tables in the same SQL statement. The example ...@@ -53,7 +53,7 @@ Data can be inserted into multiple tables in the same SQL statement. The example
INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31) (ts2, 12.6, 218, 0.33) d1002 VALUES (ts3, 12.3, 221, 0.31); INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31) (ts2, 12.6, 218, 0.33) d1002 VALUES (ts3, 12.3, 221, 0.31);
``` ```
`ts1`, `ts2` and `ts3` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detial, refer to [TDengine SQL insert timestamp section](/taos-sql/insert). `ts1`, `ts2` and `ts3` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
For more details about `INSERT` please refer to [INSERT](/taos-sql/insert). For more details about `INSERT` please refer to [INSERT](/taos-sql/insert).
......
...@@ -105,6 +105,12 @@ class Consumer: ...@@ -105,6 +105,12 @@ class Consumer:
def poll(self, timeout: float = 1.0): def poll(self, timeout: float = 1.0):
pass pass
def assignment(self):
pass
def poll(self, timeout: float = 1.0):
pass
def close(self): def close(self):
pass pass
......
...@@ -10,10 +10,10 @@ TDengine uses various kinds of caching techniques to efficiently write and query ...@@ -10,10 +10,10 @@ TDengine uses various kinds of caching techniques to efficiently write and query
TDengine uses an insert-driven cache management policy, known as first in, first out (FIFO). This policy differs from read-driven "least recently used (LRU)" cache management. A FIFO policy stores the latest data in cache and flushes the oldest data from cache to disk when the cache usage reaches a threshold. In IoT use cases, the most recent data or the current state is most important. The cache policy in TDengine, like much of the design and architecture of TDengine, is based on the nature of IoT data. TDengine uses an insert-driven cache management policy, known as first in, first out (FIFO). This policy differs from read-driven "least recently used (LRU)" cache management. A FIFO policy stores the latest data in cache and flushes the oldest data from cache to disk when the cache usage reaches a threshold. In IoT use cases, the most recent data or the current state is most important. The cache policy in TDengine, like much of the design and architecture of TDengine, is based on the nature of IoT data.
When you create a database, you can configure the size of the write cache on each vnode. The **vgroups** parameter determines the number of vgroups that process data in the database, and the **buffer** parameter determines the size of the write cache for each vnode. When you create a database, you can configure the size of the write cache on each vnode. The **vgroups** parameter determines the number of vgroups that process data in the database, and the **buffer** parameter determines the size of the write cache for each vnode. The unit of buffer is MB.
```sql ```sql
create database db0 vgroups 100 buffer 16MB create database db0 vgroups 100 buffer 16
``` ```
In theory, larger cache sizes are always better. However, at a certain point, it becomes impossible to improve performance by increasing cache size. In most scenarios, you can retain the default cache settings. In theory, larger cache sizes are always better. However, at a certain point, it becomes impossible to improve performance by increasing cache size. In most scenarios, you can retain the default cache settings.
...@@ -28,10 +28,10 @@ When you create a database, you can configure whether the latest data from every ...@@ -28,10 +28,10 @@ When you create a database, you can configure whether the latest data from every
## Metadata Cache ## Metadata Cache
To improve query and write performance, each vnode caches the metadata that it receives. When you create a database, you can configure the size of the metadata cache through the *pages* and *pagesize* parameters. To improve query and write performance, each vnode caches the metadata that it receives. When you create a database, you can configure the size of the metadata cache through the *pages* and *pagesize* parameters. The unit of pagesize is kb.
```sql ```sql
create database db0 pages 128 pagesize 16kb create database db0 pages 128 pagesize 16
``` ```
The preceding SQL statement creates 128 pages on each vnode in the `db0` database. Each page has a 16 KB metadata cache. The preceding SQL statement creates 128 pages on each vnode in the `db0` database. Each page has a 16 KB metadata cache.
......
...@@ -17,7 +17,7 @@ When you create a user-defined function, you must implement standard interface f ...@@ -17,7 +17,7 @@ When you create a user-defined function, you must implement standard interface f
- For aggregate functions, implement the `aggfn_start`, `aggfn`, and `aggfn_finish` interface functions. - For aggregate functions, implement the `aggfn_start`, `aggfn`, and `aggfn_finish` interface functions.
- To initialize your function, implement the `udf_init` function. To terminate your function, implement the `udf_destroy` function. - To initialize your function, implement the `udf_init` function. To terminate your function, implement the `udf_destroy` function.
There are strict naming conventions for these interface functions. The names of the start, finish, init, and destroy interfaces must be <udf-name\>_start, <udf-name\>_finish, <udf-name\>_init, and <udf-name\>_destroy, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function. There are strict naming conventions for these interface functions. The names of the start, finish, init, and destroy interfaces must be `_start`, `_finish`, `_init`, and `_destroy`, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function.
### Implementing a Scalar Function in C ### Implementing a Scalar Function in C
The implementation of a scalar function is described as follows: The implementation of a scalar function is described as follows:
...@@ -318,7 +318,7 @@ The implementation of a scalar UDF is described as follows: ...@@ -318,7 +318,7 @@ The implementation of a scalar UDF is described as follows:
def process(input: datablock) -> tuple[output_type]: def process(input: datablock) -> tuple[output_type]:
``` ```
Description: this function prcesses datablock, which is the input; you can use datablock.data(row, col) to access the python object at location(row,col); the output is a tuple object consisted of objects of type outputtype Description: this function processes datablock, which is the input; you can use datablock.data(row, col) to access the python object at location(row,col); the output is a tuple object consisted of objects of type outputtype
#### Aggregate UDF Interface #### Aggregate UDF Interface
...@@ -356,7 +356,7 @@ def process(input: datablock) -> tuple[output_type]: ...@@ -356,7 +356,7 @@ def process(input: datablock) -> tuple[output_type]:
# return tuple object consisted of object of type outputtype # return tuple object consisted of object of type outputtype
``` ```
Note:process() must be implemeted, init() and destroy() must be defined too but they can do nothing. Note:process() must be implemented, init() and destroy() must be defined too but they can do nothing.
#### Aggregate Template #### Aggregate Template
...@@ -377,7 +377,7 @@ def finish(buf: bytes) -> output_type: ...@@ -377,7 +377,7 @@ def finish(buf: bytes) -> output_type:
#return obj of type outputtype #return obj of type outputtype
``` ```
Note: aggregate UDF requires init(), destroy(), start(), reduce() and finish() to be impemented. start() generates the initial result in buffer, then the input data is divided into multiple row data blocks, reduce() is invoked for each data block `inputs` and intermediate `buf`, finally finish() is invoked to generate final result from the intermediate result `buf`. Note: aggregate UDF requires init(), destroy(), start(), reduce() and finish() to be implemented. start() generates the initial result in buffer, then the input data is divided into multiple row data blocks, reduce() is invoked for each data block `inputs` and intermediate `buf`, finally finish() is invoked to generate final result from the intermediate result `buf`.
### Data Mapping between TDengine SQL and Python UDF ### Data Mapping between TDengine SQL and Python UDF
...@@ -559,7 +559,7 @@ Note: Prior to TDengine 3.0.5.0 (excluding), updating a UDF requires to restart ...@@ -559,7 +559,7 @@ Note: Prior to TDengine 3.0.5.0 (excluding), updating a UDF requires to restart
#### Sample 3: UDF with n arguments #### Sample 3: UDF with n arguments
A UDF which accepts n intergers, likee (x1, x2, ..., xn) and output the sum of the product of each value and its sequence number: 1 * x1 + 2 * x2 + ... + n * xn. If there is `null` in the input, then the result is `null`. The difference from sample 1 is that it can accept any number of columns as input and process each column. Assume the program is written in /root/udf/nsum.py: A UDF which accepts n integers, likee (x1, x2, ..., xn) and output the sum of the product of each value and its sequence number: 1 * x1 + 2 * x2 + ... + n * xn. If there is `null` in the input, then the result is `null`. The difference from sample 1 is that it can accept any number of columns as input and process each column. Assume the program is written in /root/udf/nsum.py:
```python ```python
def init(): def init():
...@@ -607,7 +607,7 @@ Query OK, 4 row(s) in set (0.010653s) ...@@ -607,7 +607,7 @@ Query OK, 4 row(s) in set (0.010653s)
#### Sample 4: Utilize 3rd party package #### Sample 4: Utilize 3rd party package
A UDF which accepts a timestamp and output the next closed Sunday. This sample requires to use third party package `moment`, you need to install it firslty. A UDF which accepts a timestamp and output the next closed Sunday. This sample requires to use third party package `moment`, you need to install it firstly.
```shell ```shell
pip3 install moment pip3 install moment
...@@ -701,7 +701,7 @@ Query OK, 4 row(s) in set (1.011474s) ...@@ -701,7 +701,7 @@ Query OK, 4 row(s) in set (1.011474s)
#### Sample 5: Aggregate Function #### Sample 5: Aggregate Function
An aggregate function which calculates the difference of the maximum and the minimum in a column. An aggregate funnction takes multiple rows as input and output only one data. The execution process of an aggregate UDF is like map-reduce, the framework divides the input into multiple parts, each mapper processes one block and the reducer aggregates the result of the mappers. The reduce() of Python UDF has the functionality of both map() and reduce(). The reduce() takes two arguments: the data to be processed; and the result of other tasks executing reduce(). For exmaple, assume the code is in `/root/udf/myspread.py`. An aggregate function which calculates the difference of the maximum and the minimum in a column. An aggregate funnction takes multiple rows as input and output only one data. The execution process of an aggregate UDF is like map-reduce, the framework divides the input into multiple parts, each mapper processes one block and the reducer aggregates the result of the mappers. The reduce() of Python UDF has the functionality of both map() and reduce(). The reduce() takes two arguments: the data to be processed; and the result of other tasks executing reduce(). For example, assume the code is in `/root/udf/myspread.py`.
```python ```python
import io import io
...@@ -755,7 +755,7 @@ In this example, we implemented an aggregate function, and added some logging. ...@@ -755,7 +755,7 @@ In this example, we implemented an aggregate function, and added some logging.
2. log() is the function for logging, it converts the input object to string and output with an end of line 2. log() is the function for logging, it converts the input object to string and output with an end of line
3. destroy() closes the log file \ 3. destroy() closes the log file \
4. start() returns the initial buffer for storing the intermediate result 4. start() returns the initial buffer for storing the intermediate result
5. reduce() processes each daa block and aggregates the result 5. reduce() processes each data block and aggregates the result
6. finish() converts the final buffer() to final result\ 6. finish() converts the final buffer() to final result\
Create the UDF. Create the UDF.
......
...@@ -672,7 +672,7 @@ If you input a specific column, the number of non-null values in the column is r ...@@ -672,7 +672,7 @@ If you input a specific column, the number of non-null values in the column is r
ELAPSED(ts_primary_key [, time_unit]) ELAPSED(ts_primary_key [, time_unit])
``` ```
**Description**: `elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calculated time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length. **Description**: `elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calculated time length within each time window. If it's used without `INTERVAL` clause, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length.
**Return value type**: Double if the input value is not NULL; **Return value type**: Double if the input value is not NULL;
......
...@@ -21,7 +21,7 @@ part_list can be any scalar expression, such as a column, constant, scalar funct ...@@ -21,7 +21,7 @@ part_list can be any scalar expression, such as a column, constant, scalar funct
A PARTITION BY clause is processed as follows: A PARTITION BY clause is processed as follows:
- The PARTITION BY clause must occur after the WHERE clause - The PARTITION BY clause must occur after the WHERE clause
- The PARTITION BY caluse partitions the data according to the specified dimensions, then perform computation on each partition. The performed computation is determined by the rest of the statement - a window clause, GROUP BY clause, or SELECT clause. - The PARTITION BY clause partitions the data according to the specified dimensions, then perform computation on each partition. The performed computation is determined by the rest of the statement - a window clause, GROUP BY clause, or SELECT clause.
- The PARTITION BY clause can be used together with a window clause or GROUP BY clause. In this case, the window or GROUP BY clause takes effect on every partition. For example, the following statement partitions the table by the location tag, performs downsampling over a 10 minute window, and returns the maximum value: - The PARTITION BY clause can be used together with a window clause or GROUP BY clause. In this case, the window or GROUP BY clause takes effect on every partition. For example, the following statement partitions the table by the location tag, performs downsampling over a 10 minute window, and returns the maximum value:
```sql ```sql
......
...@@ -81,7 +81,7 @@ Provides information about user-created databases. Similar to SHOW DATABASES. ...@@ -81,7 +81,7 @@ Provides information about user-created databases. Similar to SHOW DATABASES.
| 3 | ntables | INT | Number of standard tables and subtables (not including supertables) | | 3 | ntables | INT | Number of standard tables and subtables (not including supertables) |
| 4 | vgroups | INT | Number of vgroups. It should be noted that `vnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 4 | vgroups | INT | Number of vgroups. It should be noted that `vnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 6 | replica | INT | Number of replicas. It should be noted that `replica` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 6 | replica | INT | Number of replicas. It should be noted that `replica` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 7 | strict | BINARY(3) | Strong consistency. It should be noted that `strict` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 7 | strict | BINARY(4) | Obsoleted |
| 8 | duration | INT | Duration for storage of single files. It should be noted that `duration` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 8 | duration | INT | Duration for storage of single files. It should be noted that `duration` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 9 | keep | INT | Data retention period. It should be noted that `keep` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 9 | keep | INT | Data retention period. It should be noted that `keep` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 10 | buffer | INT | Write cache size per vnode, in MB. It should be noted that `buffer` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 10 | buffer | INT | Write cache size per vnode, in MB. It should be noted that `buffer` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
......
...@@ -36,7 +36,8 @@ REST connection supports all platforms that can run Java. ...@@ -36,7 +36,8 @@ REST connection supports all platforms that can run Java.
| taos-jdbcdriver version | major changes | TDengine version | | taos-jdbcdriver version | major changes | TDengine version |
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: | | :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: |
| 3.2.1 | subscription add seek function | 3.0.5.0 or later | | 3.2.3 | Fixed resultSet data parsing failure in some cases | 3.0.5.0 or later |
| 3.2.2 | subscription add seek function | 3.0.5.0 or later |
| 3.2.1 | JDBC REST connection supports schemaless/prepareStatement over WebSocket | 3.0.3.0 or later | | 3.2.1 | JDBC REST connection supports schemaless/prepareStatement over WebSocket | 3.0.3.0 or later |
| 3.2.0 | This version has been deprecated | - | | 3.2.0 | This version has been deprecated | - |
| 3.1.0 | JDBC REST connection supports subscription over WebSocket | - | | 3.1.0 | JDBC REST connection supports subscription over WebSocket | - |
...@@ -284,10 +285,11 @@ The configuration parameters in the URL are as follows: ...@@ -284,10 +285,11 @@ The configuration parameters in the URL are as follows:
- batchfetch: true: pulls result sets in batches when executing queries; false: pulls result sets row by row. The default value is: false. batchfetch uses HTTP for data transfer. JDBC REST supports batch pulls. taos-jdbcdriver and TDengine transfer data via WebSocket connection. Compared with HTTP, WebSocket enables JDBC REST connection to support large data volume querying and improve query performance. - batchfetch: true: pulls result sets in batches when executing queries; false: pulls result sets row by row. The default value is: false. batchfetch uses HTTP for data transfer. JDBC REST supports batch pulls. taos-jdbcdriver and TDengine transfer data via WebSocket connection. Compared with HTTP, WebSocket enables JDBC REST connection to support large data volume querying and improve query performance.
- charset: specify the charset to parse the string, this parameter is valid only when set batchfetch to true. - charset: specify the charset to parse the string, this parameter is valid only when set batchfetch to true.
- batchErrorIgnore: true: when executing executeBatch of Statement, if one SQL execution fails in the middle, continue to execute the following SQL. false: no longer execute any statement after the failed SQL. The default value is: false. - batchErrorIgnore: true: when executing executeBatch of Statement, if one SQL execution fails in the middle, continue to execute the following SQL. false: no longer execute any statement after the failed SQL. The default value is: false.
- httpConnectTimeout: REST connection timeout in milliseconds, the default value is 5000 ms. - httpConnectTimeout: REST connection timeout in milliseconds, the default value is 60000 ms.
- httpSocketTimeout: socket timeout in milliseconds, the default value is 5000 ms. It only takes effect when batchfetch is false. - httpSocketTimeout: socket timeout in milliseconds, the default value is 60000 ms. It only takes effect when batchfetch is false.
- messageWaitTimeout: message transmission timeout in milliseconds, the default value is 3000 ms. It only takes effect when batchfetch is true. - messageWaitTimeout: message transmission timeout in milliseconds, the default value is 60000 ms. It only takes effect when batchfetch is true.
- useSSL: connecting Securely Using SSL. true: using SSL connection, false: not using SSL connection. - useSSL: connecting Securely Using SSL. true: using SSL connection, false: not using SSL connection.
- httpPoolSize: size of REST concurrent requests. The default value is 20.
**Note**: Some configuration items (e.g., locale, timezone) do not work in the REST connection. **Note**: Some configuration items (e.g., locale, timezone) do not work in the REST connection.
...@@ -351,10 +353,11 @@ The configuration parameters in properties are as follows. ...@@ -351,10 +353,11 @@ The configuration parameters in properties are as follows.
- TSDBDriver.PROPERTY_KEY_CHARSET: In the character set used by the client, the default value is the system character set. - TSDBDriver.PROPERTY_KEY_CHARSET: In the character set used by the client, the default value is the system character set.
- TSDBDriver.PROPERTY_KEY_LOCALE: this only takes effect when using JDBC native connection. Client language environment, the default value is system current locale. - TSDBDriver.PROPERTY_KEY_LOCALE: this only takes effect when using JDBC native connection. Client language environment, the default value is system current locale.
- TSDBDriver.PROPERTY_KEY_TIME_ZONE: only takes effect when using JDBC native connection. In the time zone used by the client, the default value is the system's current time zone. - TSDBDriver.PROPERTY_KEY_TIME_ZONE: only takes effect when using JDBC native connection. In the time zone used by the client, the default value is the system's current time zone.
- TSDBDriver.HTTP_CONNECT_TIMEOUT: REST connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using JDBC REST connection. - TSDBDriver.HTTP_CONNECT_TIMEOUT: REST connection timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection.
- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket timeout in milliseconds, the default value is 5000 ms. It only takes effect when using JDBC REST connection and batchfetch is false. - TSDBDriver.HTTP_SOCKET_TIMEOUT: socket timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection and batchfetch is false.
- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: message transmission timeout in milliseconds, the default value is 3000 ms. It only takes effect when using JDBC REST connection and batchfetch is true. - TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: message transmission timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection and batchfetch is true.
- TSDBDriver.PROPERTY_KEY_USE_SSL: connecting Securely Using SSL. true: using SSL connection, false: not using SSL connection. It only takes effect when using JDBC REST connection. - TSDBDriver.PROPERTY_KEY_USE_SSL: connecting Securely Using SSL. true: using SSL connection, false: not using SSL connection. It only takes effect when using JDBC REST connection.
- TSDBDriver.HTTP_POOL_SIZE: size of REST concurrent requests. The default value is 20.
For JDBC native connections, you can specify other parameters, such as log level, SQL length, etc., by specifying URL and Properties. For more detailed configuration, please refer to [Client Configuration](/reference/config/#Client-Only). For JDBC native connections, you can specify other parameters, such as log level, SQL length, etc., by specifying URL and Properties. For more detailed configuration, please refer to [Client Configuration](/reference/config/#Client-Only).
### Priority of configuration parameters ### Priority of configuration parameters
...@@ -419,6 +422,19 @@ while(resultSet.next()){ ...@@ -419,6 +422,19 @@ while(resultSet.next()){
> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, you have to start from 1. However, we recommend using the field names to get the values of the fields in the result set. > The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, you have to start from 1. However, we recommend using the field names to get the values of the fields in the result set.
### execute SQL with reqId
This reqId can be used to request link tracing.
```java
AbstractStatement aStmt = (AbstractStatement) connection.createStatement();
aStmt.execute("create database if not exists db", 1L);
aStmt.executeUpdate("use db", 2L);
try (ResultSet rs = aStmt.executeQuery("select * from tb", 3L)) {
Timestamp ts = rs.getTimestamp(1);
}
```
### Writing data via parameter binding ### Writing data via parameter binding
TDengine has significantly improved the bind APIs to support data writing (INSERT) scenarios. Writing data in this way avoids the resource consumption of SQL syntax parsing, resulting in significant write performance improvements in many cases. TDengine has significantly improved the bind APIs to support data writing (INSERT) scenarios. Writing data in this way avoids the resource consumption of SQL syntax parsing, resulting in significant write performance improvements in many cases.
...@@ -936,6 +952,14 @@ public class SchemalessWsTest { ...@@ -936,6 +952,14 @@ public class SchemalessWsTest {
</TabItem> </TabItem>
</Tabs> </Tabs>
### Schemaless with reqId
This reqId can be used to request link tracing.
```java
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS, 1L);
```
### Data Subscription ### Data Subscription
The TDengine Java Connector supports subscription functionality with the following application API. The TDengine Java Connector supports subscription functionality with the following application API.
...@@ -993,7 +1017,7 @@ while(true) { ...@@ -993,7 +1017,7 @@ while(true) {
#### Assignment subscription Offset #### Assignment subscription Offset
``` ```java
long position(TopicPartition partition) throws SQLException; long position(TopicPartition partition) throws SQLException;
Map<TopicPartition, Long> position(String topic) throws SQLException; Map<TopicPartition, Long> position(String topic) throws SQLException;
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException; Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
...@@ -1002,6 +1026,29 @@ Map<TopicPartition, Long> endOffsets(String topic) throws SQLException; ...@@ -1002,6 +1026,29 @@ Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
void seek(TopicPartition partition, long offset) throws SQLException; void seek(TopicPartition partition, long offset) throws SQLException;
``` ```
Example usage is as follows.
```java
String topic = "offset_seek_test";
Map<TopicPartition, Long> offset = null;
try (TaosConsumer<ResultBean> consumer = new TaosConsumer<>(properties)) {
consumer.subscribe(Collections.singletonList(topic));
for (int i = 0; i < 10; i++) {
if (i == 3) {
// Saving consumption position
offset = consumer.position(topic);
}
if (i == 5) {
// reset consumption to the previously saved position
for (Map.Entry<TopicPartition, Long> entry : offset.entrySet()) {
consumer.seek(entry.getKey(), entry.getValue());
}
}
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(500));
}
}
```
#### Close subscriptions #### Close subscriptions
```java ```java
...@@ -1308,3 +1355,7 @@ For additional troubleshooting, see [FAQ](../../../train-faq/faq). ...@@ -1308,3 +1355,7 @@ For additional troubleshooting, see [FAQ](../../../train-faq/faq).
## API Reference ## API Reference
[taos-jdbcdriver doc](https://docs.taosdata.com/api/taos-jdbcdriver) [taos-jdbcdriver doc](https://docs.taosdata.com/api/taos-jdbcdriver)
```
```
...@@ -48,7 +48,6 @@ Comparing the connector support for TDengine functional features as follows. ...@@ -48,7 +48,6 @@ Comparing the connector support for TDengine functional features as follows.
| **Parameter Binding** | Support | Support | Support | Support | Support | Support | | **Parameter Binding** | Support | Support | Support | Support | Support | Support |
| **Subscription (TMQ)** | Support | Support | Support | Support | Support | Support | | **Subscription (TMQ)** | Support | Support | Support | Support | Support | Support |
| **Schemaless** | Support | Support | Support | Support | Support | Support | | **Schemaless** | Support | Support | Support | Support | Support | Support |
| **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported |
:::info :::info
The different database framework specifications for various programming languages do not mean that all C/C++ interfaces need a wrapper. The different database framework specifications for various programming languages do not mean that all C/C++ interfaces need a wrapper.
...@@ -60,11 +59,10 @@ The different database framework specifications for various programming language ...@@ -60,11 +59,10 @@ The different database framework specifications for various programming language
| -------------------------------------- | ------------- | --------------- | ------------- | ------------- | ------------- | ------------- | | -------------------------------------- | ------------- | --------------- | ------------- | ------------- | ------------- | ------------- |
| **Connection Management** | Support | Support | Support | Support | Support | Support | | **Connection Management** | Support | Support | Support | Support | Support | Support |
| **Regular Query** | Support | Support | Support | Support | Support | Support | | **Regular Query** | Support | Support | Support | Support | Support | Support |
| **Parameter Binding** | Not Supported | Not Supported | Support | Support | Not Supported | Support | | **Parameter Binding** | Supported | Not Supported | Support | Support | Not Supported | Support |
| **Subscription (TMQ) ** | Supported | Support | Support | Not Supported | Not Supported | Support | | **Subscription (TMQ) ** | Supported | Support | Support | Not Supported | Not Supported | Support |
| **Schemaless** | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported | | **Schemaless** | Supported | Not Supported | Supported | Not Supported | Not Supported | Not Supported |
| **Bulk Pulling (based on WebSocket) ** | Support | Support | Support | Support | Support | Support | | **Bulk Pulling (based on WebSocket) ** | Support | Support | Support | Support | Support | Support |
| **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported |
:::warning :::warning
......
...@@ -19,14 +19,18 @@ taosd -C ...@@ -19,14 +19,18 @@ taosd -C
## Configuration File on Client Side ## Configuration File on Client Side
TDengine CLI `taos` is the tool for users to interact with TDengine. It can share same configuration file as `taosd` or use a separate configuration file. When launching `taos`, parameter `-c` can be used to specify the location where its configuration file is. For example `taos -c /home/cfg` means `/home/cfg/taos.cfg` will be used. If `-c` is not used, the default location of the configuration file is `/etc/taos`. For more details please use `taos --help` to get. TDengine CLI `taos` is the tool for users to interact with TDengine. It can share same configuration file as `taosd` or use a separate configuration file. When launching `taos`, parameter `-c` can be used to specify the location where its configuration file is. For example:
```bash
taos -C
``` ```
taos -c /home/cfg
```
means `/home/cfg/taos.cfg` will be used. If `-c` is not used, the default location of the configuration file is `/etc/taos`. For more details please use `taos --help` to get.
Parameter `-C` can be used on the CLI of `taos` to show its configuration, like below:
```bash ```bash
taos --dump-config taos -C
``` ```
## Configuration Parameters ## Configuration Parameters
...@@ -77,8 +81,9 @@ The parameters described in this document by the effect that they have on the sy ...@@ -77,8 +81,9 @@ The parameters described in this document by the effect that they have on the sy
| Default Value | 6030 | | Default Value | 6030 |
:::note :::note
- Ensure that your firewall rules do not block TCP port 6042 on any host in the cluster. Below table describes the ports used by TDengine in details. Ensure that your firewall rules do not block TCP port 6042 on any host in the cluster. Below table describes the ports used by TDengine in details.
::: :::
| Protocol | Default Port | Description | How to configure | | Protocol | Default Port | Description | How to configure |
| :------- | :----------- | :-------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------------- | | :------- | :----------- | :-------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------------- |
| TCP | 6030 | Communication between client and server. In a multi-node cluster, communication between nodes. serverPort | | TCP | 6030 | Communication between client and server. In a multi-node cluster, communication between nodes. serverPort |
...@@ -120,6 +125,8 @@ The parameters described in this document by the effect that they have on the sy ...@@ -120,6 +125,8 @@ The parameters described in this document by the effect that they have on the sy
:::note :::note
Please note the `taoskeeper` needs to be installed and running to create the `log` database and receiving metrics sent by `taosd` as the full monitoring solution. Please note the `taoskeeper` needs to be installed and running to create the `log` database and receiving metrics sent by `taosd` as the full monitoring solution.
:::
### monitor ### monitor
| Attribute | Description | | Attribute | Description |
......
---
sidebar_label: DBeaver
title: DBeaver
description: You can use DBeaver to access your data stored in TDengine and TDengine Cloud.
---
[DBeaver](https://dbeaver.io/) is a popular cross-platform database management tool that facilitates data management for developers, database administrators, data analysts, and other users. Starting from version 23.1.1, DBeaver natively supports TDengine and can be used to manage TDengine Cloud as well as TDengine clusters deployed on-premises.
## Prerequisites
To use DBeaver to manage TDengine, you need to prepare the following:
- Install DBeaver. DBeaver supports mainstream operating systems including Windows, macOS, and Linux. Please make sure you download and install the correct version (23.1.1+) and platform package. Please refer to the [official DBeaver documentation](https://github.com/dbeaver/dbeaver/wiki/Installation) for detailed installation steps.
- If you use an on-premises TDengine cluster, please make sure that TDengine and taosAdapter are deployed and running properly. For detailed information, please refer to the taosAdapter User Manual.
- If you use TDengine Cloud, please [register](https://cloud.tdengine.com/) for an account.
## Usage
### Use DBeaver to access on-premises TDengine cluster
1. Start the DBeaver application, click the button or menu item to choose **New Database Connection**, and then select **TDengine** in the **Timeseries** category.
![Connect TDengine with DBeaver](./dbeaver/dbeaver-connect-tdengine-en.webp)
2. Configure the TDengine connection by filling in the host address, port number, username, and password. If TDengine is deployed on the local machine, you are only required to fill in the username and password. The default username is root and the default password is taosdata. Click **Test Connection** to check whether the connection is workable. If you do not have the TDengine Java connector installed on the local machine, DBeaver will prompt you to download and install it.
![Configure the TDengine connection](./dbeaver/dbeaver-config-tdengine-en.webp))
3. If the connection is successful, it will be displayed as shown in the following figure. If the connection fails, please check whether the TDengine service and taosAdapter are running correctly and whether the host address, port number, username, and password are correct.
![Connection successful](./dbeaver/dbeaver-connect-tdengine-test-en.webp)
4. Use DBeaver to select databases and tables and browse your data stored in TDengine.
![Browse TDengine data with DBeaver](./dbeaver/dbeaver-browse-data-en.webp)
5. You can also manipulate TDengine data by executing SQL commands.
![Use SQL commands to manipulate TDengine data in DBeaver](./dbeaver/dbeaver-sql-execution-en.webp)
### Use DBeaver to access TDengine Cloud
1. Log in to the TDengine Cloud service, select **Programming** > **Java** in the management console, and then copy the string value of `TDENGINE_JDBC_URL` displayed in the **Config** section.
![Copy JDBC URL from TDengine Cloud](./dbeaver/tdengine-cloud-jdbc-dsn-en.webp)
2. Start the DBeaver application, click the button or menu item to choose **New Database Connection**, and then select **TDengine Cloud** in the **Timeseries** category.
![Connect TDengine Cloud with DBeaver](./dbeaver/dbeaver-connect-tdengine-cloud-en.webp)
3. Configure the TDengine Cloud connection by filling in the JDBC URL value. Click **Test Connection**. If you do not have the TDengine Java connector installed on the local machine, DBeaver will prompt you to download and install it. If the connection is successful, it will be displayed as shown in the following figure. If the connection fails, please check whether the TDengine Cloud service is running properly and whether the JDBC URL is correct.
![Configure the TDengine Cloud connection](./dbeaver/dbeaver-connect-tdengine-cloud-test-en.webp)
4. Use DBeaver to select databases and tables and browse your data stored in TDengine Cloud.
![Browse TDengine Cloud data with DBeaver](./dbeaver/dbeaver-browse-data-cloud-en.webp)
5. You can also manipulate TDengine Cloud data by executing SQL commands.
![Use SQL commands to manipulate TDengine Cloud data in DBeaver](./dbeaver/dbeaver-sql-execution-cloud-en.webp)
...@@ -56,7 +56,7 @@ This error indicates that the client could not connect to the server. Perform th ...@@ -56,7 +56,7 @@ This error indicates that the client could not connect to the server. Perform th
7. If you are using the Python, Java, Go, Rust, C#, or Node.js connector on Linux to connect to the server, verify that `libtaos.so` is in the `/usr/local/taos/driver` directory and `/usr/local/taos/driver` is in the `LD_LIBRARY_PATH` environment variable. 7. If you are using the Python, Java, Go, Rust, C#, or Node.js connector on Linux to connect to the server, verify that `libtaos.so` is in the `/usr/local/taos/driver` directory and `/usr/local/taos/driver` is in the `LD_LIBRARY_PATH` environment variable.
8. If you are using macOS, verify that `libtaos.dylib` is in the `/usr/local/lib` directory and `/usr/local/lib` is in the `LD_LIBRARY_PATH` environment variable.. 8. If you are using macOS, verify that `libtaos.dylib` is in the `/usr/local/lib` directory and `/usr/local/lib` is in the `DYLD_LIBRARY_PATH` environment variable..
9. If you are using Windows, verify that `C:\TDengine\driver\taos.dll` is in the `PATH` environment variable. If possible, move `taos.dll` to the `C:\Windows\System32` directory. 9. If you are using Windows, verify that `C:\TDengine\driver\taos.dll` is in the `PATH` environment variable. If possible, move `taos.dll` to the `C:\Windows\System32` directory.
......
...@@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w ...@@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
import Release from "/components/ReleaseV3"; import Release from "/components/ReleaseV3";
## 3.0.5.1
<Release type="tdengine" version="3.0.5.1" />
## 3.0.5.0 ## 3.0.5.0
<Release type="tdengine" version="3.0.5.0" /> <Release type="tdengine" version="3.0.5.0" />
......
...@@ -10,6 +10,10 @@ For other historical version installers, please visit [here](https://www.taosdat ...@@ -10,6 +10,10 @@ For other historical version installers, please visit [here](https://www.taosdat
import Release from "/components/ReleaseV3"; import Release from "/components/ReleaseV3";
## 2.5.2
<Release type="tools" version="2.5.2" />
## 2.5.1 ## 2.5.1
<Release type="tools" version="2.5.1" /> <Release type="tools" version="2.5.1" />
......
#!
import taosws
import taos
db_name = 'test_ws_stmt'
def before():
taos_conn = taos.connect()
taos_conn.execute("drop database if exists %s" % db_name)
taos_conn.execute("create database %s" % db_name)
taos_conn.select_db(db_name)
taos_conn.execute("create table t1 (ts timestamp, a int, b float, c varchar(10))")
taos_conn.execute(
"create table stb1 (ts timestamp, a int, b float, c varchar(10)) tags (t1 int, t2 binary(10))")
taos_conn.close()
def stmt_insert():
before()
conn = taosws.connect('taosws://root:taosdata@localhost:6041/%s' % db_name)
while True:
try:
stmt = conn.statement()
stmt.prepare("insert into t1 values (?, ?, ?, ?)")
stmt.bind_param([
taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
taosws.ints_to_column([1, 2, 3, 4]),
taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
taosws.varchar_to_column(['a', 'b', 'c', 'd']),
])
stmt.add_batch()
rows = stmt.execute()
print(rows)
stmt.close()
except Exception as e:
if 'Retry needed' in e.args[0]: # deal with [0x0125] Retry needed
continue
else:
raise e
break
def stmt_insert_into_stable():
before()
conn = taosws.connect("taosws://root:taosdata@localhost:6041/%s" % db_name)
while True:
try:
stmt = conn.statement()
stmt.prepare("insert into ? using stb1 tags (?, ?) values (?, ?, ?, ?)")
stmt.set_tbname('stb1_1')
stmt.set_tags([
taosws.int_to_tag(1),
taosws.varchar_to_tag('aaa'),
])
stmt.bind_param([
taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
taosws.ints_to_column([1, 2, 3, 4]),
taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
taosws.varchar_to_column(['a', 'b', 'c', 'd']),
])
stmt.add_batch()
rows = stmt.execute()
print(rows)
stmt.close()
except Exception as e:
if 'Retry needed' in e.args[0]: # deal with [0x0125] Retry needed
continue
else:
raise e
break
#!
import time
import taosws
import taos
def before_test(db_name):
taos_conn = taos.connect()
taos_conn.execute("drop database if exists %s" % db_name)
taos_conn.execute("create database %s" % db_name)
taos_conn.select_db(db_name)
taos_conn.execute("create table t1 (ts timestamp, a int, b float, c varchar(10))")
taos_conn.execute(
"create table stb1 (ts timestamp, a int, b float, c varchar(10)) tags (t1 int, t2 binary(10))")
taos_conn.close()
def after_test(db_name):
taos_conn = taos.connect()
taos_conn.execute("drop database if exists %s" % db_name)
taos_conn.close()
def stmt_insert():
db_name = 'test_ws_stmt_{}'.format(int(time.time()))
before_test(db_name)
conn = taosws.connect('taosws://root:taosdata@localhost:6041/%s' % db_name)
stmt = conn.statement()
stmt.prepare("insert into t1 values (?, ?, ?, ?)")
stmt.bind_param([
taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
taosws.ints_to_column([1, 2, 3, 4]),
taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
taosws.varchar_to_column(['a', 'b', 'c', 'd']),
])
stmt.add_batch()
rows = stmt.execute()
assert rows == 4
stmt.close()
after_test(db_name)
def stmt_insert_into_stable():
db_name = 'test_ws_stmt_{}'.format(int(time.time()))
before_test(db_name)
conn = taosws.connect("taosws://root:taosdata@localhost:6041/%s" % db_name)
stmt = conn.statement()
stmt.prepare("insert into ? using stb1 tags (?, ?) values (?, ?, ?, ?)")
stmt.set_tbname('stb1_1')
stmt.set_tags([
taosws.int_to_tag(1),
taosws.varchar_to_tag('aaa'),
])
stmt.bind_param([
taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
taosws.ints_to_column([1, 2, 3, 4]),
taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
taosws.varchar_to_column(['a', 'b', 'c', 'd']),
])
stmt.add_batch()
rows = stmt.execute()
assert rows == 4
stmt.close()
after_test(db_name)
if __name__ == '__main__':
stmt_insert()
stmt_insert_into_stable()
此差异已折叠。
此差异已折叠。
...@@ -105,6 +105,12 @@ class Consumer: ...@@ -105,6 +105,12 @@ class Consumer:
def poll(self, timeout: float = 1.0): def poll(self, timeout: float = 1.0):
pass pass
def assignment(self):
pass
def seek(self, partition):
pass
def close(self): def close(self):
pass pass
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册