未验证 提交 1e51a9cf 编写于 作者: D dapan1121 提交者: GitHub

Merge pull request #21852 from taosdata/feat/mergeMainTo3.0

merge main to 3.0
......@@ -15,11 +15,15 @@ SET(TD_COMMUNITY_DIR ${PROJECT_SOURCE_DIR})
set(TD_SUPPORT_DIR "${TD_SOURCE_DIR}/cmake")
set(TD_CONTRIB_DIR "${TD_SOURCE_DIR}/contrib")
include(${TD_SUPPORT_DIR}/cmake.platform)
include(${TD_SUPPORT_DIR}/cmake.define)
include(${TD_SUPPORT_DIR}/cmake.options)
include(${TD_SUPPORT_DIR}/cmake.version)
# contrib
add_subdirectory(contrib)
......
......@@ -68,14 +68,14 @@ sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-d
```bash
sudo yum install epel-release
sudo yum update
sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel
sudo yum install -y gcc gcc-c++ make cmake3 gflags git openssl-devel
sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
```
### CentOS 8/Fedora/Rocky Linux
```bash
sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
sudo dnf install -y gcc gcc-c++ gflags make cmake epel-release git openssl-devel
```
#### 在 CentOS 上构建 taosTools 安装依赖软件
......@@ -117,7 +117,7 @@ scl enable devtoolset-9 -- bash
### macOS
```
brew install argp-standalone pkgconfig
brew install argp-standalone gflags pkgconfig
```
### 设置 golang 开发环境
......
......@@ -76,14 +76,14 @@ sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-d
```bash
sudo yum install epel-release
sudo yum update
sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel
sudo yum install -y gcc gcc-c++ make cmake3 gflags git openssl-devel
sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
```
### CentOS 8/Fedora/Rocky Linux
```bash
sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
sudo dnf install -y gcc gcc-c++ make cmake epel-release gflags git openssl-devel
```
#### Install build dependencies for taosTools on CentOS
......@@ -124,7 +124,7 @@ scl enable devtoolset-9 -- bash
### macOS
```
brew install argp-standalone pkgconfig
brew install argp-standalone gflags pkgconfig
```
### Setup golang environment
......
......@@ -189,3 +189,9 @@ option(
"If build release version"
OFF
)
option(
BUILD_CONTRIB
"If build thirdpart from source"
OFF
)
......@@ -121,6 +121,12 @@ IF ("${CPUTYPE}" STREQUAL "")
SET(TD_LOONGARCH_64 TRUE)
ADD_DEFINITIONS("-D_TD_LOONGARCH_")
ADD_DEFINITIONS("-D_TD_LOONGARCH_64")
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "mips64")
SET(PLATFORM_ARCH_STR "mips")
MESSAGE(STATUS "input cpuType: mips64")
SET(TD_MIPS_64 TRUE)
ADD_DEFINITIONS("-D_TD_MIPS_")
ADD_DEFINITIONS("-D_TD_MIPS_64")
ENDIF ()
ELSE ()
# if generate ARM version:
......@@ -176,6 +182,8 @@ set(TD_DEPS_DIR "x86")
if (TD_LINUX)
IF (TD_ARM_64 OR TD_ARM_32)
set(TD_DEPS_DIR "arm")
ELSEIF (TD_MIPS_64)
set(TD_DEPS_DIR "mips")
ELSE()
set(TD_DEPS_DIR "x86")
ENDIF()
......
# rocksdb
IF (NOT ${TD_LINUX})
ExternalProject_Add(rocksdb
GIT_REPOSITORY https://github.com/facebook/rocksdb.git
GIT_TAG v8.1.1
SOURCE_DIR "${TD_CONTRIB_DIR}/rocksdb"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
if (${BUILD_CONTRIB})
ExternalProject_Add(rocksdb
URL https://github.com/facebook/rocksdb/archive/refs/tags/v8.1.1.tar.gz
URL_HASH MD5=3b4c97ee45df9c8a5517308d31ab008b
DOWNLOAD_NO_PROGRESS 1
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
SOURCE_DIR "${TD_CONTRIB_DIR}/rocksdb"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)
ENDIF(NOT ${TD_LINUX})
else()
if (NOT ${TD_LINUX})
ExternalProject_Add(rocksdb
URL https://github.com/facebook/rocksdb/archive/refs/tags/v8.1.1.tar.gz
URL_HASH MD5=3b4c97ee45df9c8a5517308d31ab008b
DOWNLOAD_NO_PROGRESS 1
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
SOURCE_DIR "${TD_CONTRIB_DIR}/rocksdb"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)
endif()
endif()
......@@ -2,7 +2,7 @@
# taosadapter
ExternalProject_Add(taosadapter
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
GIT_TAG 3.0
GIT_TAG main
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
......
......@@ -77,19 +77,23 @@ if(${BUILD_WITH_LEVELDB})
cat("${TD_SUPPORT_DIR}/leveldb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif(${BUILD_WITH_LEVELDB})
# rocksdb
IF (NOT ${TD_LINUX})
if(${BUILD_WITH_ROCKSDB})
cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
add_definitions(-DUSE_ROCKSDB)
endif(${BUILD_WITH_ROCKSDB})
ELSE()
if(${BUILD_WITH_ROCKSDB})
#cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
add_definitions(-DUSE_ROCKSDB)
endif(${BUILD_WITH_ROCKSDB})
ENDIF(NOT ${TD_LINUX})
if (${BUILD_CONTRIB})
if(${BUILD_WITH_ROCKSDB})
cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
add_definitions(-DUSE_ROCKSDB)
endif()
else()
if (NOT ${TD_LINUX})
if(${BUILD_WITH_ROCKSDB})
cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
add_definitions(-DUSE_ROCKSDB)
endif(${BUILD_WITH_ROCKSDB})
else()
if(${BUILD_WITH_ROCKSDB})
add_definitions(-DUSE_ROCKSDB)
endif(${BUILD_WITH_ROCKSDB})
endif()
endif()
# canonical-raft
if(${BUILD_WITH_CRAFT})
......@@ -235,70 +239,114 @@ endif(${BUILD_WITH_LEVELDB})
# rocksdb
# To support rocksdb build on ubuntu: sudo apt-get install libgflags-dev
IF (NOT ${TD_LINUX})
if(${BUILD_WITH_ROCKSDB})
if (${BUILD_WITH_UV})
if(${TD_LINUX})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL} -Wno-error=maybe-uninitialized -Wno-error=unused-but-set-variable -Wno-error=unused-variable -Wno-error=unused-function -Wno-errno=unused-private-field -Wno-error=unused-result")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}")
IF ("${CMAKE_BUILD_TYPE}" STREQUAL "")
SET(CMAKE_BUILD_TYPE Release)
endif()
endif(${TD_LINUX})
MESSAGE(STATUS "CXXXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS})
if(${TD_DARWIN})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
endif(${TD_DARWIN})
if (${TD_DARWIN_ARM64})
set(HAS_ARMV8_CRC true)
endif(${TD_DARWIN_ARM64})
if (${TD_WINDOWS})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819")
endif(${TD_WINDOWS})
if(${TD_DARWIN})
option(HAVE_THREAD_LOCAL "" OFF)
option(WITH_IOSTATS_CONTEXT "" OFF)
option(WITH_PERF_CONTEXT "" OFF)
endif(${TD_DARWIN})
if(${TD_WINDOWS})
option(WITH_JNI "" OFF)
endif(${TD_WINDOWS})
if(${TD_WINDOWS})
option(WITH_MD_LIBRARY "build with MD" OFF)
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
endif(${TD_WINDOWS})
option(WITH_FALLOCATE "" OFF)
option(WITH_JEMALLOC "" OFF)
option(WITH_GFLAGS "" OFF)
option(PORTABLE "" ON)
option(WITH_LIBURING "" OFF)
option(FAIL_ON_WARNINGS OFF)
option(WITH_TESTS "" OFF)
option(WITH_BENCHMARK_TOOLS "" OFF)
option(WITH_TOOLS "" OFF)
option(WITH_LIBURING "" OFF)
IF (TD_LINUX)
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
ELSE()
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
ENDIF()
add_subdirectory(rocksdb EXCLUDE_FROM_ALL)
target_include_directories(
rocksdb
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/include>
)
endif(${BUILD_WITH_ROCKSDB})
endif (${BUILD_WITH_UV})
if (${BUILD_WITH_ROCKSDB})
if (${BUILD_CONTRIB})
if(${TD_LINUX})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL} -Wno-error=maybe-uninitialized -Wno-error=unused-but-set-variable -Wno-error=unused-variable -Wno-error=unused-function -Wno-errno=unused-private-field -Wno-error=unused-result")
if ("${CMAKE_BUILD_TYPE}" STREQUAL "")
SET(CMAKE_BUILD_TYPE Release)
endif()
endif(${TD_LINUX})
MESSAGE(STATUS "CXXXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS})
if(${TD_DARWIN})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
endif(${TD_DARWIN})
if (${TD_DARWIN_ARM64})
set(HAS_ARMV8_CRC true)
endif(${TD_DARWIN_ARM64})
if (${TD_WINDOWS})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819")
option(WITH_JNI "" OFF)
option(WITH_MD_LIBRARY "build with MD" OFF)
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
endif(${TD_WINDOWS})
if(${TD_DARWIN})
option(HAVE_THREAD_LOCAL "" OFF)
option(WITH_IOSTATS_CONTEXT "" OFF)
option(WITH_PERF_CONTEXT "" OFF)
endif(${TD_DARWIN})
option(WITH_FALLOCATE "" OFF)
option(WITH_JEMALLOC "" OFF)
option(WITH_GFLAGS "" OFF)
option(PORTABLE "" ON)
option(WITH_LIBURING "" OFF)
option(FAIL_ON_WARNINGS OFF)
option(WITH_TESTS "" OFF)
option(WITH_BENCHMARK_TOOLS "" OFF)
option(WITH_TOOLS "" OFF)
option(WITH_LIBURING "" OFF)
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
add_subdirectory(rocksdb EXCLUDE_FROM_ALL)
target_include_directories(
rocksdb
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/include>
)
else()
if (NOT ${TD_LINUX})
MESSAGE(STATUS "CXXXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS})
if(${TD_DARWIN})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
endif(${TD_DARWIN})
if (${TD_DARWIN_ARM64})
set(HAS_ARMV8_CRC true)
endif(${TD_DARWIN_ARM64})
if (${TD_WINDOWS})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819")
option(WITH_JNI "" OFF)
option(WITH_MD_LIBRARY "build with MD" OFF)
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
endif(${TD_WINDOWS})
if(${TD_DARWIN})
option(HAVE_THREAD_LOCAL "" OFF)
option(WITH_IOSTATS_CONTEXT "" OFF)
option(WITH_PERF_CONTEXT "" OFF)
endif(${TD_DARWIN})
option(WITH_FALLOCATE "" OFF)
option(WITH_JEMALLOC "" OFF)
option(WITH_GFLAGS "" OFF)
option(PORTABLE "" ON)
option(WITH_LIBURING "" OFF)
option(FAIL_ON_WARNINGS OFF)
option(WITH_TESTS "" OFF)
option(WITH_BENCHMARK_TOOLS "" OFF)
option(WITH_TOOLS "" OFF)
option(WITH_LIBURING "" OFF)
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
add_subdirectory(rocksdb EXCLUDE_FROM_ALL)
target_include_directories(
rocksdb
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/include>
)
endif()
endif()
endif()
ENDIF(NOT ${TD_LINUX})
# lucene
# To support build on ubuntu: sudo apt-get install libboost-all-dev
if(${BUILD_WITH_LUCENE})
......
此差异已折叠。
......@@ -20,6 +20,19 @@ The standard server installation package includes `taos`, `taosd`, `taosAdapter`
The TDengine Community Edition is released as Deb and RPM packages. The Deb package can be installed on Debian, Ubuntu, and derivative systems. The RPM package can be installed on CentOS, RHEL, SUSE, and derivative systems. A .tar.gz package is also provided for enterprise customers, and you can install TDengine over `apt-get` as well. The .tar.tz package includes `taosdump` and the TDinsight installation script. If you want to use these utilities with the Deb or RPM package, download and install taosTools separately. TDengine can also be installed on x64 Windows and x64/m1 macOS.
## Operating environment requirements
In the Linux system, the minimum requirements for the operating environment are as follows:
linux core version - 3.10.0-1160.83.1.el7.x86_64;
glibc version - 2.17;
If compiling and installing through clone source code, it is also necessary to meet the following requirements:
cmake version - 3.26.4 or above;
gcc version - 9.3.1 or above;
## Installation
<Tabs>
......
......@@ -10,10 +10,10 @@ TDengine uses various kinds of caching techniques to efficiently write and query
TDengine uses an insert-driven cache management policy, known as first in, first out (FIFO). This policy differs from read-driven "least recently used (LRU)" cache management. A FIFO policy stores the latest data in cache and flushes the oldest data from cache to disk when the cache usage reaches a threshold. In IoT use cases, the most recent data or the current state is most important. The cache policy in TDengine, like much of the design and architecture of TDengine, is based on the nature of IoT data.
When you create a database, you can configure the size of the write cache on each vnode. The **vgroups** parameter determines the number of vgroups that process data in the database, and the **buffer** parameter determines the size of the write cache for each vnode.
When you create a database, you can configure the size of the write cache on each vnode. The **vgroups** parameter determines the number of vgroups that process data in the database, and the **buffer** parameter determines the size of the write cache for each vnode. The unit of buffer is MB.
```sql
create database db0 vgroups 100 buffer 16MB
create database db0 vgroups 100 buffer 16
```
In theory, larger cache sizes are always better. However, at a certain point, it becomes impossible to improve performance by increasing cache size. In most scenarios, you can retain the default cache settings.
......@@ -28,10 +28,10 @@ When you create a database, you can configure whether the latest data from every
## Metadata Cache
To improve query and write performance, each vnode caches the metadata that it receives. When you create a database, you can configure the size of the metadata cache through the *pages* and *pagesize* parameters.
To improve query and write performance, each vnode caches the metadata that it receives. When you create a database, you can configure the size of the metadata cache through the *pages* and *pagesize* parameters. The unit of pagesize is kb.
```sql
create database db0 pages 128 pagesize 16kb
create database db0 pages 128 pagesize 16
```
The preceding SQL statement creates 128 pages on each vnode in the `db0` database. Each page has a 16 KB metadata cache.
......
......@@ -81,7 +81,7 @@ Provides information about user-created databases. Similar to SHOW DATABASES.
| 3 | ntables | INT | Number of standard tables and subtables (not including supertables) |
| 4 | vgroups | INT | Number of vgroups. It should be noted that `vnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 6 | replica | INT | Number of replicas. It should be noted that `replica` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 7 | strict | BINARY(3) | Strong consistency. It should be noted that `strict` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 7 | strict | BINARY(4) | Obsoleted |
| 8 | duration | INT | Duration for storage of single files. It should be noted that `duration` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 9 | keep | INT | Data retention period. It should be noted that `keep` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 10 | buffer | INT | Write cache size per vnode, in MB. It should be noted that `buffer` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
......
......@@ -288,6 +288,7 @@ The configuration parameters in the URL are as follows:
- httpSocketTimeout: socket timeout in milliseconds, the default value is 5000 ms. It only takes effect when batchfetch is false.
- messageWaitTimeout: message transmission timeout in milliseconds, the default value is 3000 ms. It only takes effect when batchfetch is true.
- useSSL: connecting Securely Using SSL. true: using SSL connection, false: not using SSL connection.
- httpPoolSize: size of REST concurrent requests. The default value is 20.
**Note**: Some configuration items (e.g., locale, timezone) do not work in the REST connection.
......@@ -355,6 +356,7 @@ The configuration parameters in properties are as follows.
- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket timeout in milliseconds, the default value is 5000 ms. It only takes effect when using JDBC REST connection and batchfetch is false.
- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: message transmission timeout in milliseconds, the default value is 3000 ms. It only takes effect when using JDBC REST connection and batchfetch is true.
- TSDBDriver.PROPERTY_KEY_USE_SSL: connecting Securely Using SSL. true: using SSL connection, false: not using SSL connection. It only takes effect when using JDBC REST connection.
- TSDBDriver.HTTP_POOL_SIZE: size of REST concurrent requests. The default value is 20.
For JDBC native connections, you can specify other parameters, such as log level, SQL length, etc., by specifying URL and Properties. For more detailed configuration, please refer to [Client Configuration](/reference/config/#Client-Only).
### Priority of configuration parameters
......@@ -419,6 +421,19 @@ while(resultSet.next()){
> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, you have to start from 1. However, we recommend using the field names to get the values of the fields in the result set.
### execute SQL with reqId
This reqId can be used to request link tracing.
```java
AbstractStatement aStmt = (AbstractStatement) connection.createStatement();
aStmt.execute("create database if not exists db", 1L);
aStmt.executeUpdate("use db", 2L);
try (ResultSet rs = aStmt.executeQuery("select * from tb", 3L)) {
Timestamp ts = rs.getTimestamp(1);
}
```
### Writing data via parameter binding
TDengine has significantly improved the bind APIs to support data writing (INSERT) scenarios. Writing data in this way avoids the resource consumption of SQL syntax parsing, resulting in significant write performance improvements in many cases.
......@@ -936,6 +951,14 @@ public class SchemalessWsTest {
</TabItem>
</Tabs>
### Schemaless with reqId
This reqId can be used to request link tracing.
```java
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS, 1L);
```
### Data Subscription
The TDengine Java Connector supports subscription functionality with the following application API.
......@@ -993,7 +1016,7 @@ while(true) {
#### Assignment subscription Offset
```
```java
long position(TopicPartition partition) throws SQLException;
Map<TopicPartition, Long> position(String topic) throws SQLException;
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
......@@ -1002,6 +1025,29 @@ Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
void seek(TopicPartition partition, long offset) throws SQLException;
```
Example usage is as follows.
```java
String topic = "offset_seek_test";
Map<TopicPartition, Long> offset = null;
try (TaosConsumer<ResultBean> consumer = new TaosConsumer<>(properties)) {
consumer.subscribe(Collections.singletonList(topic));
for (int i = 0; i < 10; i++) {
if (i == 3) {
// Saving consumption position
offset = consumer.position(topic);
}
if (i == 5) {
// reset consumption to the previously saved position
for (Map.Entry<TopicPartition, Long> entry : offset.entrySet()) {
consumer.seek(entry.getKey(), entry.getValue());
}
}
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(500));
}
}
```
#### Close subscriptions
```java
......@@ -1308,3 +1354,7 @@ For additional troubleshooting, see [FAQ](../../../train-faq/faq).
## API Reference
[taos-jdbcdriver doc](https://docs.taosdata.com/api/taos-jdbcdriver)
```
```
......@@ -48,7 +48,6 @@ Comparing the connector support for TDengine functional features as follows.
| **Parameter Binding** | Support | Support | Support | Support | Support | Support |
| **Subscription (TMQ)** | Support | Support | Support | Support | Support | Support |
| **Schemaless** | Support | Support | Support | Support | Support | Support |
| **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported |
:::info
The different database framework specifications for various programming languages do not mean that all C/C++ interfaces need a wrapper.
......@@ -60,11 +59,10 @@ The different database framework specifications for various programming language
| -------------------------------------- | ------------- | --------------- | ------------- | ------------- | ------------- | ------------- |
| **Connection Management** | Support | Support | Support | Support | Support | Support |
| **Regular Query** | Support | Support | Support | Support | Support | Support |
| **Parameter Binding** | Not Supported | Not Supported | Support | Support | Not Supported | Support |
| **Parameter Binding** | Supported | Not Supported | Support | Support | Not Supported | Support |
| **Subscription (TMQ) ** | Supported | Support | Support | Not Supported | Not Supported | Support |
| **Schemaless** | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported |
| **Schemaless** | Supported | Not Supported | Supported | Not Supported | Not Supported | Not Supported |
| **Bulk Pulling (based on WebSocket) ** | Support | Support | Support | Support | Support | Support |
| **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported |
:::warning
......
......@@ -19,14 +19,18 @@ taosd -C
## Configuration File on Client Side
TDengine CLI `taos` is the tool for users to interact with TDengine. It can share same configuration file as `taosd` or use a separate configuration file. When launching `taos`, parameter `-c` can be used to specify the location where its configuration file is. For example `taos -c /home/cfg` means `/home/cfg/taos.cfg` will be used. If `-c` is not used, the default location of the configuration file is `/etc/taos`. For more details please use `taos --help` to get.
TDengine CLI `taos` is the tool for users to interact with TDengine. It can share same configuration file as `taosd` or use a separate configuration file. When launching `taos`, parameter `-c` can be used to specify the location where its configuration file is. For example:
```bash
taos -C
```
taos -c /home/cfg
```
means `/home/cfg/taos.cfg` will be used. If `-c` is not used, the default location of the configuration file is `/etc/taos`. For more details please use `taos --help` to get.
Parameter `-C` can be used on the CLI of `taos` to show its configuration, like below:
```bash
taos --dump-config
taos -C
```
## Configuration Parameters
......@@ -77,8 +81,9 @@ The parameters described in this document by the effect that they have on the sy
| Default Value | 6030 |
:::note
- Ensure that your firewall rules do not block TCP port 6042 on any host in the cluster. Below table describes the ports used by TDengine in details.
Ensure that your firewall rules do not block TCP port 6042 on any host in the cluster. Below table describes the ports used by TDengine in details.
:::
| Protocol | Default Port | Description | How to configure |
| :------- | :----------- | :-------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------------- |
| TCP | 6030 | Communication between client and server. In a multi-node cluster, communication between nodes. serverPort |
......@@ -120,6 +125,8 @@ The parameters described in this document by the effect that they have on the sy
:::note
Please note the `taoskeeper` needs to be installed and running to create the `log` database and receiving metrics sent by `taosd` as the full monitoring solution.
:::
### monitor
| Attribute | Description |
......
---
sidebar_label: DBeaver
title: DBeaver
description: You can use DBeaver to access your data stored in TDengine and TDengine Cloud.
---
[DBeaver](https://dbeaver.io/) is a popular cross-platform database management tool that facilitates data management for developers, database administrators, data analysts, and other users. Starting from version 23.1.1, DBeaver natively supports TDengine and can be used to manage TDengine Cloud as well as TDengine clusters deployed on-premises.
## Prerequisites
To use DBeaver to manage TDengine, you need to prepare the following:
- Install DBeaver. DBeaver supports mainstream operating systems including Windows, macOS, and Linux. Please make sure you download and install the correct version (23.1.1+) and platform package. Please refer to the [official DBeaver documentation](https://github.com/dbeaver/dbeaver/wiki/Installation) for detailed installation steps.
- If you use an on-premises TDengine cluster, please make sure that TDengine and taosAdapter are deployed and running properly. For detailed information, please refer to the taosAdapter User Manual.
- If you use TDengine Cloud, please [register](https://cloud.tdengine.com/) for an account.
## Usage
### Use DBeaver to access on-premises TDengine cluster
1. Start the DBeaver application, click the button or menu item to choose **New Database Connection**, and then select **TDengine** in the **Timeseries** category.
![Connect TDengine with DBeaver](./dbeaver/dbeaver-connect-tdengine-en.webp)
2. Configure the TDengine connection by filling in the host address, port number, username, and password. If TDengine is deployed on the local machine, you are only required to fill in the username and password. The default username is root and the default password is taosdata. Click **Test Connection** to check whether the connection is workable. If you do not have the TDengine Java connector installed on the local machine, DBeaver will prompt you to download and install it.
![Configure the TDengine connection](./dbeaver/dbeaver-config-tdengine-en.webp))
3. If the connection is successful, it will be displayed as shown in the following figure. If the connection fails, please check whether the TDengine service and taosAdapter are running correctly and whether the host address, port number, username, and password are correct.
![Connection successful](./dbeaver/dbeaver-connect-tdengine-test-en.webp)
4. Use DBeaver to select databases and tables and browse your data stored in TDengine.
![Browse TDengine data with DBeaver](./dbeaver/dbeaver-browse-data-en.webp)
5. You can also manipulate TDengine data by executing SQL commands.
![Use SQL commands to manipulate TDengine data in DBeaver](./dbeaver/dbeaver-sql-execution-en.webp)
### Use DBeaver to access TDengine Cloud
1. Log in to the TDengine Cloud service, select **Programming** > **Java** in the management console, and then copy the string value of `TDENGINE_JDBC_URL` displayed in the **Config** section.
![Copy JDBC URL from TDengine Cloud](./dbeaver/tdengine-cloud-jdbc-dsn-en.webp)
2. Start the DBeaver application, click the button or menu item to choose **New Database Connection**, and then select **TDengine Cloud** in the **Timeseries** category.
![Connect TDengine Cloud with DBeaver](./dbeaver/dbeaver-connect-tdengine-cloud-en.webp)
3. Configure the TDengine Cloud connection by filling in the JDBC URL value. Click **Test Connection**. If you do not have the TDengine Java connector installed on the local machine, DBeaver will prompt you to download and install it. If the connection is successful, it will be displayed as shown in the following figure. If the connection fails, please check whether the TDengine Cloud service is running properly and whether the JDBC URL is correct.
![Configure the TDengine Cloud connection](./dbeaver/dbeaver-connect-tdengine-cloud-test-en.webp)
4. Use DBeaver to select databases and tables and browse your data stored in TDengine Cloud.
![Browse TDengine Cloud data with DBeaver](./dbeaver/dbeaver-browse-data-cloud-en.webp)
5. You can also manipulate TDengine Cloud data by executing SQL commands.
![Use SQL commands to manipulate TDengine Cloud data in DBeaver](./dbeaver/dbeaver-sql-execution-cloud-en.webp)
......@@ -56,7 +56,7 @@ This error indicates that the client could not connect to the server. Perform th
7. If you are using the Python, Java, Go, Rust, C#, or Node.js connector on Linux to connect to the server, verify that `libtaos.so` is in the `/usr/local/taos/driver` directory and `/usr/local/taos/driver` is in the `LD_LIBRARY_PATH` environment variable.
8. If you are using macOS, verify that `libtaos.dylib` is in the `/usr/local/lib` directory and `/usr/local/lib` is in the `LD_LIBRARY_PATH` environment variable..
8. If you are using macOS, verify that `libtaos.dylib` is in the `/usr/local/lib` directory and `/usr/local/lib` is in the `DYLD_LIBRARY_PATH` environment variable..
9. If you are using Windows, verify that `C:\TDengine\driver\taos.dll` is in the `PATH` environment variable. If possible, move `taos.dll` to the `C:\Windows\System32` directory.
......
......@@ -16,6 +16,20 @@ TDengine 完整的软件包包括服务端(taosd)、应用驱动(taosc)
在 Linux 系统上,TDengine 社区版提供 Deb 和 RPM 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 Deb 支持 Debian/Ubuntu 及其衍生系统,RPM 支持 CentOS/RHEL/SUSE 及其衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包,也支持通过 `apt-get` 工具从线上进行安装。需要注意的是,RPM 和 Deb 包不含 `taosdump` 和 TDinsight 安装脚本,这些工具需要通过安装 taosTools 包获得。TDengine 也提供 Windows x64 平台和 macOS x64/m1 平台的安装包。
## 运行环境要求
在linux系统中,运行环境最低要求如下:
linux 内核版本 - 3.10.0-1160.83.1.el7.x86_64;
glibc 版本 - 2.17;
如果通过clone源码进行编译安装,还需要满足:
cmake版本 - 3.26.4或以上;
gcc 版本 - 9.3.1或以上;
## 安装
<Tabs>
......
......@@ -10,10 +10,10 @@ description: "TDengine 内部的缓存设计"
TDengine 采用时间驱动缓存管理策略(First-In-First-Out,FIFO),又称为写驱动的缓存管理机制。这种策略有别于读驱动的数据缓存模式(Least-Recent-Used,LRU),直接将最近写入的数据保存在系统的缓存中。当缓存达到临界值的时候,将最早的数据批量写入磁盘。一般意义上来说,对于物联网数据的使用,用户最为关心最近产生的数据,即当前状态。TDengine 充分利用了这一特性,将最近到达的(当前状态)数据保存在缓存中。
每个 vnode 的写入缓存大小在创建数据库时决定,创建数据库时的两个关键参数 vgroups 和 buffer 分别决定了该数据库中的数据由多少个 vgroup 处理,以及向其中的每个 vnode 分配多少写入缓存。
每个 vnode 的写入缓存大小在创建数据库时决定,创建数据库时的两个关键参数 vgroups 和 buffer 分别决定了该数据库中的数据由多少个 vgroup 处理,以及向其中的每个 vnode 分配多少写入缓存。buffer 的单位是MB。
```sql
create database db0 vgroups 100 buffer 16MB
create database db0 vgroups 100 buffer 16
```
理论上缓存越大越好,但超过一定阈值后再增加缓存对写入性能提升并无帮助,一般情况下使用默认值即可。
......@@ -28,10 +28,10 @@ create database db0 vgroups 100 buffer 16MB
## 元数据缓存
为了更高效地处理查询和写入,每个 vnode 都会缓存自己曾经获取到的元数据。元数据缓存由创建数据库时的两个参数 pages 和 pagesize 决定。
为了更高效地处理查询和写入,每个 vnode 都会缓存自己曾经获取到的元数据。元数据缓存由创建数据库时的两个参数 pages 和 pagesize 决定。pagesize 的单位是 kb。
```sql
create database db0 pages 128 pagesize 16kb
create database db0 pages 128 pagesize 16
```
上述语句会为数据库 db0 的每个 vnode 创建 128 个 page,每个 page 16kb 的元数据缓存。
......
......@@ -291,6 +291,7 @@ url 中的配置参数如下:
- httpSocketTimeout: socket 超时时间,单位 ms,默认值为 5000。仅在 batchfetch 设置为 false 时生效。
- messageWaitTimeout: 消息超时时间, 单位 ms, 默认值为 3000。 仅在 batchfetch 设置为 true 时生效。
- useSSL: 连接中是否使用 SSL。
- httpPoolSize: REST 并发请求大小,默认 20。
**注意**:部分配置项(比如:locale、timezone)在 REST 连接中不生效。
......@@ -358,6 +359,7 @@ properties 中的配置参数如下:
- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket 超时时间,单位 ms,默认值为 5000。仅在 REST 连接且 batchfetch 设置为 false 时生效。
- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: 消息超时时间, 单位 ms, 默认值为 3000。 仅在 REST 连接且 batchfetch 设置为 true 时生效。
- TSDBDriver.PROPERTY_KEY_USE_SSL: 连接中是否使用 SSL。仅在 REST 连接时生效。
- TSDBDriver.HTTP_POOL_SIZE: REST 并发请求大小,默认 20。
此外对 JDBC 原生连接,通过指定 URL 和 Properties 还可以指定其他参数,比如日志级别、SQL 长度等。更多详细配置请参考[客户端配置](/reference/config/#仅客户端适用)。
### 配置参数的优先级
......@@ -422,6 +424,19 @@ while(resultSet.next()){
> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。
### 执行带有 reqId 的 SQL
此 reqId 可用于请求链路追踪。
```java
AbstractStatement aStmt = (AbstractStatement) connection.createStatement();
aStmt.execute("create database if not exists db", 1L);
aStmt.executeUpdate("use db", 2L);
try (ResultSet rs = aStmt.executeQuery("select * from tb", 3L)) {
Timestamp ts = rs.getTimestamp(1);
}
```
### 通过参数绑定写入数据
TDengine 的 JDBC 原生连接实现大幅改进了参数绑定方式对数据写入(INSERT)场景的支持。采用这种方式写入数据时,能避免 SQL 语法解析的资源消耗,从而在很多情况下显著提升写入性能。
......@@ -939,6 +954,14 @@ public class SchemalessWsTest {
</TabItem>
</Tabs>
### 执行带有 reqId 的无模式写入
此 reqId 可用于请求链路追踪。
```java
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS, 1L);
```
### 数据订阅
TDengine Java 连接器支持订阅功能,应用 API 如下:
......@@ -996,7 +1019,7 @@ while(true) {
#### 指定订阅 Offset
```
```java
long position(TopicPartition partition) throws SQLException;
Map<TopicPartition, Long> position(String topic) throws SQLException;
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
......@@ -1005,6 +1028,29 @@ Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
void seek(TopicPartition partition, long offset) throws SQLException;
```
示例代码:
```java
String topic = "offset_seek_test";
Map<TopicPartition, Long> offset = null;
try (TaosConsumer<ResultBean> consumer = new TaosConsumer<>(properties)) {
consumer.subscribe(Collections.singletonList(topic));
for (int i = 0; i < 10; i++) {
if (i == 3) {
// Saving consumption position
offset = consumer.position(topic);
}
if (i == 5) {
// reset consumption to the previously saved position
for (Map.Entry<TopicPartition, Long> entry : offset.entrySet()) {
consumer.seek(entry.getKey(), entry.getValue());
}
}
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(500));
}
}
```
#### 关闭订阅
```java
......
......@@ -45,9 +45,8 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器
| **连接管理** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **普通查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **参数绑定** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **数据订阅(TMQ)** | 暂不支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **数据订阅(TMQ)** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **Schemaless** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **DataFrame** | 不支持 | 支持 | 不支持 | 不支持 | 不支持 | 不支持 |
:::info
由于不同编程语言数据库框架规范不同,并不意味着所有 C/C++ 接口都需要对应封装支持。
......@@ -59,11 +58,10 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器
| ------------------------------ | -------- | ---------- | -------- | -------- | ----------- | -------- |
| **连接管理** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **普通查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **参数绑定** | 暂不支持 | 暂不支持 | 支持 | 支持 | 暂不支持 | 支持 |
| **参数绑定** | 支持 | 暂不支持 | 支持 | 支持 | 暂不支持 | 支持 |
| **数据订阅(TMQ)** | 支持 | 支持 | 支持 | 暂不支持 | 暂不支持 | 支持 |
| **Schemaless** | 支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 |
| **Schemaless** | 支持 | 暂不支持 | 支持 | 暂不支持 | 暂不支持 | 支持 |
| **批量拉取(基于 WebSocket)** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **DataFrame** | 不支持 | 支持 | 不支持 | 不支持 | 不支持 | 不支持 |
:::warning
......
......@@ -81,7 +81,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
| 3 | ntables | INT | 数据库中表的数量,包含子表和普通表但不包含超级表 |
| 4 | vgroups | INT | 数据库中有多少个 vgroup。需要注意,`vgroups` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 6 | replica | INT | 副本数。需要注意,`replica` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 7 | strict | BINARY(3) | 强一致性。需要注意,`strict` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 7 | strict | BINARY(4) | 废弃参数 |
| 8 | duration | INT | 单文件存储数据的时间跨度。需要注意,`duration` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 9 | keep | INT | 数据保留时长。需要注意,`keep` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 10 | buffer | INT | 每个 vnode 写缓存的内存块大小,单位 MB。需要注意,`buffer` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
......
......@@ -19,16 +19,20 @@ taosd -C
## 为客户端指定配置文件
TDengine 系统的前台交互客户端应用程序为 taos,以及应用驱动,它可以与 taosd 共享同一个配置文件 taos.cfg,也可以使用单独指定配置文件。运行 taos 时,使用参数-c 指定配置文件目录,如 taos -c /home/cfg,表示使用/home/cfg/目录下的 taos.cfg 配置文件中的参数,缺省目录是/etc/taos。更多 taos 的使用方法请见帮助信息 `taos --help`
TDengine 系统的前台交互客户端应用程序为 taos,以及应用驱动,它可以与 taosd 共享同一个配置文件 taos.cfg,也可以使用单独指定配置文件。运行 taos 时,使用参数-c 指定配置文件目录,如
```bash
taos -C
```
taos -c /home/cfg
```
表示使用/home/cfg/目录下的 taos.cfg 配置文件中的参数,缺省目录是/etc/taos。 另外可以使用 `-C` 显示当前服务器配置参数:
```bash
taos --dump-config
taos -C
```
更多 taos 的使用方法请见帮助信息 `taos --help`
## 配置参数详细列表
:::note
......@@ -139,6 +143,8 @@ taos --dump-config
:::note
请注意,完整的监控功能需要安装并运行 `taoskeeper` 服务。taoskeeper 负责接收监控指标数据并创建 `log` 库。
:::
### monitor
| 属性 | 说明 |
......
---
sidebar_label: DBeaver
title: DBeaver
description: 使用 DBeaver 存取 TDengine 数据的详细指南
---
DBeaver 是一款流行的跨平台数据库管理工具,方便开发者、数据库管理员、数据分析师等用户管理数据。DBeaver 从 23.1.1 版本开始内嵌支持 TDengine。既支持独立部署的 TDengine 集群也支持 TDengine Cloud。
## 前置条件
### 安装 DBeaver
使用 DBeaver 管理 TDengine 需要以下几方面的准备工作。
- 安装 DBeaver。DBeaver 支持主流操作系统包括 Windows、macOS 和 Linux。请注意[下载](https://dbeaver.io/download/)正确平台和版本(23.1.1+)的安装包。详细安装步骤请参考 [DBeaver 官方文档](https://github.com/dbeaver/dbeaver/wiki/Installation)
- 如果使用独立部署的 TDengine 集群,请确认 TDengine 正常运行,并且 taosAdapter 已经安装并正常运行,具体细节请参考 [taosAdapter 的使用手册](/reference/taosadapter)
- 如果使用 TDengine Cloud,请[注册](https://cloud.taosdata.com/)相应账号。
## 使用步骤
### 使用 DBeaver 访问内部部署的 TDengine
1. 启动 DBeaver 应用,点击按钮或菜单项选择“连接到数据库”,然后在时间序列分类栏中选择 TDengine。
![DBeaver 连接 TDengine](./dbeaver/dbeaver-connect-tdengine-zh.webp)
2. 配置 TDengine 连接,填入主机地址、端口号、用户名和密码。如果 TDengine 部署在本机,可以只填用户名和密码,默认用户名为 root,默认密码为 taosdata。点击“测试连接”可以对连接是否可用进行测试。如果本机没有安装 TDengine Java
连接器,DBeaver 会提示下载安装。
![配置 TDengine 连接](./dbeaver/dbeaver-config-tdengine-zh.webp)
3. 连接成功将显示如下图所示。如果显示连接失败,请检查 TDengine 服务和 taosAdapter 是否正确运行,主机地址、端口号、用户名和密码是否正确。
![连接成功](./dbeaver/dbeaver-connect-tdengine-test-zh.webp)
4. 使用 DBeaver 选择数据库和表可以浏览 TDengine 服务的数据。
![DBeaver 浏览 TDengine 数据](./dbeaver/dbeaver-browse-data-zh.webp)
5. 也可以通过执行 SQL 命令的方式对 TDengine 数据进行操作。
![DBeaver SQL 命令](./dbeaver/dbeaver-sql-execution-zh.webp)
### 使用 DBeaver 访问 TDengine Cloud
1. 登录 TDengine Cloud 服务,在管理界面中选择“编程”和“Java”,然后复制 TDENGINE_JDBC_URL 的字符串值。
![复制 TDengine Cloud DSN](./dbeaver/tdengine-cloud-jdbc-dsn-zh.webp)
2. 启动 DBeaver 应用,点击按钮或菜单项选择“连接到数据库”,然后在时间序列分类栏中选择 TDengine Cloud。
![DBeaver 连接 TDengine Cloud](./dbeaver/dbeaver-connect-tdengine-cloud-zh.webp)
3. 配置 TDengine Cloud 连接,填入 JDBC_URL 值。点击“测试连接”,如果本机没有安装 TDengine Java
连接器,DBeaver 会提示下载安装。连接成功将显示如下图所示。如果显示连接失败,请检查 TDengine Cloud 服务是否启动,JDBC_URL 是否正确。
![配置 TDengine Cloud 连接](./dbeaver/dbeaver-connect-tdengine-cloud-test-zh.webp)
4. 使用 DBeaver 选择数据库和表可以浏览 TDengine Cloud 服务的数据。
![DBeaver 浏览 TDengine Cloud 数据](./dbeaver/dbeaver-browse-cloud-data-zh.webp)
5. 也可以通过执行 SQL 命令的方式对 TDengine Cloud 数据进行操作。
![DBeaver SQL 命令 操作 TDengine Cloud](./dbeaver/dbeaver-sql-execution-cloud-zh.webp)
......@@ -165,9 +165,7 @@ Vnode 会保持一个数据版本号(version),对内存数据进行持久
### 同步复制
对于数据一致性要求更高的场景,异步数据复制提供的最终一致性无法满足要求。因此 TDengine 提供同步复制的机制供用户选择。在创建数据库时,除指定副本数 `replica` 之外,用户还需要指定新的参数 `strict`。如果 `strict` 等于 1,它表示每次 leader 转发给副本时,需要等待半数以上副本达成一致后,才能通知应用,数据在 follower 已经写入成功。如果在一定的时间内,得不到半数以上副本的确认,leader vnode 将返回错误给应用。
采用同步复制,系统的性能会有所下降,而且 latency 会增加。因为元数据要强一致,mnode 之间的数据同步缺省就是采用的同步复制。
对于数据一致性要求更高的场景,异步数据复制提供的最终一致性无法满足要求。因此 TDengine 3.0 使用了同步复制的机制(参照 RAFT 协议的标准实现)。每次 leader vnode 转发给其他副本时,需要等待半数以上(包含自己)副本达成一致后,才能通知应用写入成功。如果在一定的时间内,得不到半数以上副本的确认,leader vnode 将返回错误给应用。
## 缓存与持久化
......
......@@ -3386,7 +3386,6 @@ typedef struct {
int8_t reserved;
} SMqHbRsp;
#define TD_AUTO_CREATE_TABLE 0x1
typedef struct {
int64_t suid;
......
......@@ -216,7 +216,7 @@ bool fmIsUserDefinedFunc(int32_t funcId);
bool fmIsDistExecFunc(int32_t funcId);
bool fmIsForbidFillFunc(int32_t funcId);
bool fmIsForbidStreamFunc(int32_t funcId);
bool fmIsForbidSuperTableFunc(int32_t funcId);
bool fmIsForbidSysTableFunc(int32_t funcId);
bool fmIsIntervalInterpoFunc(int32_t funcId);
bool fmIsInterpFunc(int32_t funcId);
bool fmIsLastRowFunc(int32_t funcId);
......
......@@ -214,7 +214,6 @@ int32_t walSkipFetchBody(SWalReader *pRead, const SWalCkHead *pHead);
void walRefFirstVer(SWal *, SWalRef *);
void walRefLastVer(SWal *, SWalRef *);
//void walRefCommitVer(SWal *, SWalRef *);
SWalRef *walOpenRef(SWal *);
void walCloseRef(SWal *pWal, int64_t refId);
......
......@@ -704,6 +704,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_PAR_INVALID_TAGS_PC TAOS_DEF_ERROR_CODE(0, 0x2665)
#define TSDB_CODE_PAR_INVALID_TIMELINE_QUERY TAOS_DEF_ERROR_CODE(0, 0x2666)
#define TSDB_CODE_PAR_INVALID_OPTR_USAGE TAOS_DEF_ERROR_CODE(0, 0x2667)
#define TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC TAOS_DEF_ERROR_CODE(0, 0x2668)
#define TSDB_CODE_PAR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x26FF)
//planner
......
......@@ -43,7 +43,7 @@ typedef struct SMultiwayMergeTreeInfo {
int32_t tMergeTreeCreate(SMultiwayMergeTreeInfo **pTree, uint32_t numOfEntries, void *param,
__merge_compare_fn_t compareFn);
void tMergeTreeDestroy(SMultiwayMergeTreeInfo *pTree);
void tMergeTreeDestroy(SMultiwayMergeTreeInfo **pTree);
void tMergeTreeAdjust(SMultiwayMergeTreeInfo *pTree, int32_t idx);
......
......@@ -24,7 +24,8 @@ extern "C" {
typedef struct SLRUCache SLRUCache;
typedef void (*_taos_lru_deleter_t)(const void *key, size_t keyLen, void *value);
typedef void (*_taos_lru_deleter_t)(const void *key, size_t keyLen, void *value, void *ud);
typedef int (*_taos_lru_functor_t)(const void *key, size_t keyLen, void *value, void *ud);
typedef struct LRUHandle LRUHandle;
......@@ -41,10 +42,11 @@ SLRUCache *taosLRUCacheInit(size_t capacity, int numShardBits, double highPriPoo
void taosLRUCacheCleanup(SLRUCache *cache);
LRUStatus taosLRUCacheInsert(SLRUCache *cache, const void *key, size_t keyLen, void *value, size_t charge,
_taos_lru_deleter_t deleter, LRUHandle **handle, LRUPriority priority);
_taos_lru_deleter_t deleter, LRUHandle **handle, LRUPriority priority, void *ud);
LRUHandle *taosLRUCacheLookup(SLRUCache *cache, const void *key, size_t keyLen);
void taosLRUCacheErase(SLRUCache *cache, const void *key, size_t keyLen);
void taosLRUCacheApply(SLRUCache *cache, _taos_lru_functor_t functor, void *ud);
void taosLRUCacheEraseUnrefEntries(SLRUCache *cache);
bool taosLRUCacheRef(SLRUCache *cache, LRUHandle *handle);
......
......@@ -1788,6 +1788,7 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i
len += lenTmp;
pStart += lenTmp;
int32_t estimateColLen = 0;
for (int32_t j = 0; j < numOfRows; ++j) {
if (offset[j] == -1) {
continue;
......@@ -1797,20 +1798,21 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i
int32_t jsonInnerType = *data;
char* jsonInnerData = data + CHAR_BYTES;
if (jsonInnerType == TSDB_DATA_TYPE_NULL) {
len += (VARSTR_HEADER_SIZE + strlen(TSDB_DATA_NULL_STR_L));
estimateColLen += (VARSTR_HEADER_SIZE + strlen(TSDB_DATA_NULL_STR_L));
} else if (tTagIsJson(data)) {
len += (VARSTR_HEADER_SIZE + ((const STag*)(data))->len);
estimateColLen += (VARSTR_HEADER_SIZE + ((const STag*)(data))->len);
} else if (jsonInnerType == TSDB_DATA_TYPE_NCHAR) { // value -> "value"
len += varDataTLen(jsonInnerData) + CHAR_BYTES * 2;
estimateColLen += varDataTLen(jsonInnerData) + CHAR_BYTES * 2;
} else if (jsonInnerType == TSDB_DATA_TYPE_DOUBLE) {
len += (VARSTR_HEADER_SIZE + 32);
estimateColLen += (VARSTR_HEADER_SIZE + 32);
} else if (jsonInnerType == TSDB_DATA_TYPE_BOOL) {
len += (VARSTR_HEADER_SIZE + 5);
estimateColLen += (VARSTR_HEADER_SIZE + 5);
} else {
tscError("estimateJsonLen error: invalid type:%d", jsonInnerType);
return -1;
}
}
len += TMAX(colLen, estimateColLen);
} else if (IS_VAR_DATA_TYPE(pResultInfo->fields[i].type)) {
int32_t lenTmp = numOfRows * sizeof(int32_t);
len += (lenTmp + colLen);
......
......@@ -202,7 +202,7 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin
bool keyEscaped = false;
size_t keyLenEscaped = 0;
while (*sql < sqlEnd) {
if (unlikely(IS_COMMA(*sql))) {
if (unlikely(IS_SPACE(*sql) || IS_COMMA(*sql))) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", *sql);
return TSDB_CODE_SML_INVALID_DATA;
}
......@@ -410,7 +410,7 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin
bool keyEscaped = false;
size_t keyLenEscaped = 0;
while (*sql < sqlEnd) {
if (unlikely(IS_COMMA(*sql))) {
if (unlikely(IS_SPACE(*sql) || IS_COMMA(*sql))) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", *sql);
return TSDB_CODE_SML_INVALID_DATA;
}
......@@ -436,19 +436,20 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin
size_t valueLen = 0;
bool valueEscaped = false;
size_t valueLenEscaped = 0;
bool isInQuote = false;
int quoteNum = 0;
const char *escapeChar = NULL;
while (*sql < sqlEnd) {
// parse value
if (unlikely(*(*sql) == QUOTE && (*(*sql - 1) != SLASH || (*sql - 1) == escapeChar))) {
isInQuote = !isInQuote;
quoteNum++;
(*sql)++;
continue;
}
if (!isInQuote) {
if (unlikely(IS_SPACE(*sql) || IS_COMMA(*sql))) {
if(quoteNum > 2){
break;
}
continue;
}
if (quoteNum % 2 == 0 && (unlikely(IS_SPACE(*sql) || IS_COMMA(*sql)))) {
break;
}
if (IS_SLASH_LETTER_IN_FIELD_VALUE(*sql) && (*sql - 1) != escapeChar) {
escapeChar = *sql;
......@@ -460,8 +461,8 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin
}
valueLen = *sql - value;
if (unlikely(isInQuote)) {
smlBuildInvalidDataMsg(&info->msgBuf, "only one quote", value);
if (unlikely(quoteNum != 0 && quoteNum != 2)) {
smlBuildInvalidDataMsg(&info->msgBuf, "unbalanced quotes", value);
return TSDB_CODE_SML_INVALID_DATA;
}
if (unlikely(valueLen == 0)) {
......
......@@ -224,6 +224,8 @@ TEST(testCase, smlParseCols_Error_Test) {
"st,tt=aa c 1=2 1626006833639000000,",
//field value double quote,slash
"st,tt=aa c=\"a\"a\" 1626006833639000000,",
"escape_test,tag1=\"tag1_value\",tag2=\"tag2_value\" co l0=\"col0_value\",col1=\"col1_value\" 1680918783010000000",
"escape_test,tag1=\"tag1_value\",tag2=\"tag2_value\" col0=\"co\"l\"0_value\",col1=\"col1_value\" 1680918783010000000"
};
SSmlHandle *info = smlBuildSmlInfo(NULL);
......
......@@ -1713,7 +1713,8 @@ static int32_t colDataMoveVarData(SColumnInfoData* pColInfoData, size_t start, s
static void colDataTrimFirstNRows(SColumnInfoData* pColInfoData, size_t n, size_t total) {
if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) {
pColInfoData->varmeta.length = colDataMoveVarData(pColInfoData, n, total);
// pColInfoData->varmeta.length = colDataMoveVarData(pColInfoData, n, total);
memmove(pColInfoData->varmeta.offset, &pColInfoData->varmeta.offset[n], (total - n) * sizeof(int32_t));
// clear the offset value of the unused entries.
memset(&pColInfoData->varmeta.offset[total - n], 0, n);
......@@ -1745,7 +1746,7 @@ int32_t blockDataTrimFirstRows(SSDataBlock* pBlock, size_t n) {
static void colDataKeepFirstNRows(SColumnInfoData* pColInfoData, size_t n, size_t total) {
if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) {
pColInfoData->varmeta.length = colDataMoveVarData(pColInfoData, 0, n);
// pColInfoData->varmeta.length = colDataMoveVarData(pColInfoData, 0, n);
memset(&pColInfoData->varmeta.offset[n], 0, total - n);
} else { // reset the bitmap value
/*int32_t stopIndex = BitmapLen(n) * 8;
......
......@@ -468,6 +468,7 @@ typedef struct {
int8_t replica;
int16_t numOfColumns;
int32_t numOfRows;
int32_t curIterPackedRows;
void* pIter;
SMnode* pMnode;
STableMetaRsp* pMeta;
......@@ -606,25 +607,25 @@ void tDeleteSubscribeObj(SMqSubscribeObj* pSub);
int32_t tEncodeSubscribeObj(void** buf, const SMqSubscribeObj* pSub);
void* tDecodeSubscribeObj(const void* buf, SMqSubscribeObj* pSub, int8_t sver);
typedef struct {
int32_t epoch;
SArray* consumers; // SArray<SMqConsumerEp*>
} SMqSubActionLogEntry;
SMqSubActionLogEntry* tCloneSMqSubActionLogEntry(SMqSubActionLogEntry* pEntry);
void tDeleteSMqSubActionLogEntry(SMqSubActionLogEntry* pEntry);
int32_t tEncodeSMqSubActionLogEntry(void** buf, const SMqSubActionLogEntry* pEntry);
void* tDecodeSMqSubActionLogEntry(const void* buf, SMqSubActionLogEntry* pEntry);
typedef struct {
char key[TSDB_SUBSCRIBE_KEY_LEN];
SArray* logs; // SArray<SMqSubActionLogEntry*>
} SMqSubActionLogObj;
SMqSubActionLogObj* tCloneSMqSubActionLogObj(SMqSubActionLogObj* pLog);
void tDeleteSMqSubActionLogObj(SMqSubActionLogObj* pLog);
int32_t tEncodeSMqSubActionLogObj(void** buf, const SMqSubActionLogObj* pLog);
void* tDecodeSMqSubActionLogObj(const void* buf, SMqSubActionLogObj* pLog);
//typedef struct {
// int32_t epoch;
// SArray* consumers; // SArray<SMqConsumerEp*>
//} SMqSubActionLogEntry;
//SMqSubActionLogEntry* tCloneSMqSubActionLogEntry(SMqSubActionLogEntry* pEntry);
//void tDeleteSMqSubActionLogEntry(SMqSubActionLogEntry* pEntry);
//int32_t tEncodeSMqSubActionLogEntry(void** buf, const SMqSubActionLogEntry* pEntry);
//void* tDecodeSMqSubActionLogEntry(const void* buf, SMqSubActionLogEntry* pEntry);
//
//typedef struct {
// char key[TSDB_SUBSCRIBE_KEY_LEN];
// SArray* logs; // SArray<SMqSubActionLogEntry*>
//} SMqSubActionLogObj;
//
//SMqSubActionLogObj* tCloneSMqSubActionLogObj(SMqSubActionLogObj* pLog);
//void tDeleteSMqSubActionLogObj(SMqSubActionLogObj* pLog);
//int32_t tEncodeSMqSubActionLogObj(void** buf, const SMqSubActionLogObj* pLog);
//void* tDecodeSMqSubActionLogObj(const void* buf, SMqSubActionLogObj* pLog);
typedef struct {
int32_t oldConsumerNum;
......@@ -643,7 +644,7 @@ typedef struct {
SArray* removedConsumers; // SArray<int64_t>
SArray* modifyConsumers; // SArray<int64_t>
SMqSubscribeObj* pSub;
SMqSubActionLogEntry* pLogEntry;
// SMqSubActionLogEntry* pLogEntry;
} SMqRebOutputObj;
typedef struct {
......
......@@ -765,107 +765,129 @@ static int32_t mndRetrieveConns(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl
return numOfRows;
}
static int32_t mndRetrieveQueries(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows) {
SMnode *pMnode = pReq->info.node;
SSdb *pSdb = pMnode->pSdb;
int32_t numOfRows = 0;
int32_t cols = 0;
SConnObj *pConn = NULL;
if (pShow->pIter == NULL) {
SProfileMgmt *pMgmt = &pMnode->profileMgmt;
pShow->pIter = taosCacheCreateIter(pMgmt->connCache);
/**
* @param pConn the conn queries pack from
* @param[out] pBlock the block data packed into
* @param offset skip [offset] queries in pConn
* @param rowsToPack at most rows to pack
* @return rows packed
*/
static int32_t packQueriesIntoBlock(SShowObj* pShow, SConnObj* pConn, SSDataBlock* pBlock, uint32_t offset, uint32_t rowsToPack) {
int32_t cols = 0;
taosRLockLatch(&pConn->queryLock);
int32_t numOfQueries = taosArrayGetSize(pConn->pQueries);
if (NULL == pConn->pQueries || numOfQueries <= offset) {
taosRUnLockLatch(&pConn->queryLock);
return 0;
}
while (numOfRows < rows) {
pConn = mndGetNextConn(pMnode, pShow->pIter);
if (pConn == NULL) {
pShow->pIter = NULL;
break;
}
int32_t i = offset;
for (; i < numOfQueries && (i - offset) < rowsToPack; ++i) {
int32_t curRowIndex = pBlock->info.rows;
SQueryDesc *pQuery = taosArrayGet(pConn->pQueries, i);
cols = 0;
taosRLockLatch(&pConn->queryLock);
if (NULL == pConn->pQueries || taosArrayGetSize(pConn->pQueries) <= 0) {
taosRUnLockLatch(&pConn->queryLock);
continue;
}
char queryId[26 + VARSTR_HEADER_SIZE] = {0};
sprintf(&queryId[VARSTR_HEADER_SIZE], "%x:%" PRIx64, pConn->id, pQuery->reqRid);
varDataLen(queryId) = strlen(&queryId[VARSTR_HEADER_SIZE]);
SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, curRowIndex, (const char *)queryId, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, curRowIndex, (const char *)&pQuery->queryId, false);
int32_t numOfQueries = taosArrayGetSize(pConn->pQueries);
for (int32_t i = 0; i < numOfQueries && numOfRows < rows; ++i) {
SQueryDesc *pQuery = taosArrayGet(pConn->pQueries, i);
cols = 0;
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, curRowIndex, (const char *)&pConn->id, false);
char queryId[26 + VARSTR_HEADER_SIZE] = {0};
sprintf(&queryId[VARSTR_HEADER_SIZE], "%x:%" PRIx64, pConn->id, pQuery->reqRid);
varDataLen(queryId) = strlen(&queryId[VARSTR_HEADER_SIZE]);
SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)queryId, false);
char app[TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE];
STR_TO_VARSTR(app, pConn->app);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, curRowIndex, (const char *)app, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&pQuery->queryId, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, curRowIndex, (const char *)&pConn->pid, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&pConn->id, false);
char user[TSDB_USER_LEN + VARSTR_HEADER_SIZE] = {0};
STR_TO_VARSTR(user, pConn->user);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, curRowIndex, (const char *)user, false);
char app[TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE];
STR_TO_VARSTR(app, pConn->app);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)app, false);
char endpoint[TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE] = {0};
sprintf(&endpoint[VARSTR_HEADER_SIZE], "%s:%d", taosIpStr(pConn->ip), pConn->port);
varDataLen(endpoint) = strlen(&endpoint[VARSTR_HEADER_SIZE]);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, curRowIndex, (const char *)endpoint, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&pConn->pid, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, curRowIndex, (const char *)&pQuery->stime, false);
char user[TSDB_USER_LEN + VARSTR_HEADER_SIZE] = {0};
STR_TO_VARSTR(user, pConn->user);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)user, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, curRowIndex, (const char *)&pQuery->useconds, false);
char endpoint[TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE] = {0};
sprintf(&endpoint[VARSTR_HEADER_SIZE], "%s:%d", taosIpStr(pConn->ip), pConn->port);
varDataLen(endpoint) = strlen(&endpoint[VARSTR_HEADER_SIZE]);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)endpoint, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, curRowIndex, (const char *)&pQuery->stableQuery, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&pQuery->stime, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, curRowIndex, (const char *)&pQuery->subPlanNum, false);
char subStatus[TSDB_SHOW_SUBQUERY_LEN + VARSTR_HEADER_SIZE] = {0};
int32_t strSize = sizeof(subStatus);
int32_t offset = VARSTR_HEADER_SIZE;
for (int32_t i = 0; i < pQuery->subPlanNum && offset < strSize; ++i) {
if (i) {
offset += snprintf(subStatus + offset, strSize - offset - 1, ",");
}
SQuerySubDesc *pDesc = taosArrayGet(pQuery->subDesc, i);
offset += snprintf(subStatus + offset, strSize - offset - 1, "%" PRIu64 ":%s", pDesc->tid, pDesc->status);
}
varDataLen(subStatus) = strlen(&subStatus[VARSTR_HEADER_SIZE]);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, curRowIndex, subStatus, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&pQuery->useconds, false);
char sql[TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE] = {0};
STR_TO_VARSTR(sql, pQuery->sql);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, curRowIndex, (const char *)sql, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&pQuery->stableQuery, false);
pBlock->info.rows++;
}
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&pQuery->isSubQuery, false);
taosRUnLockLatch(&pConn->queryLock);
return i - offset;
}
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&pQuery->subPlanNum, false);
static int32_t mndRetrieveQueries(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows) {
SMnode * pMnode = pReq->info.node;
SSdb * pSdb = pMnode->pSdb;
int32_t numOfRows = 0;
SConnObj *pConn = NULL;
char subStatus[TSDB_SHOW_SUBQUERY_LEN + VARSTR_HEADER_SIZE] = {0};
int32_t strSize = sizeof(subStatus);
int32_t offset = VARSTR_HEADER_SIZE;
for (int32_t i = 0; i < pQuery->subPlanNum && offset < strSize; ++i) {
if (i) {
offset += snprintf(subStatus + offset, strSize - offset - 1, ",");
}
SQuerySubDesc *pDesc = taosArrayGet(pQuery->subDesc, i);
offset += snprintf(subStatus + offset, strSize - offset - 1, "%" PRIu64 ":%s", pDesc->tid, pDesc->status);
}
varDataLen(subStatus) = strlen(&subStatus[VARSTR_HEADER_SIZE]);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, subStatus, false);
if (pShow->pIter == NULL) {
SProfileMgmt *pMgmt = &pMnode->profileMgmt;
pShow->pIter = taosCacheCreateIter(pMgmt->connCache);
}
char sql[TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE] = {0};
STR_TO_VARSTR(sql, pQuery->sql);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)sql, false);
// means fetched some data last time for this conn
if (pShow->curIterPackedRows > 0) {
size_t len = 0;
pConn = taosCacheIterGetData(pShow->pIter, &len);
if (pConn && (taosArrayGetSize(pConn->pQueries) > pShow->curIterPackedRows)) {
numOfRows = packQueriesIntoBlock(pShow, pConn, pBlock, pShow->curIterPackedRows, rows);
pShow->curIterPackedRows += numOfRows;
}
}
numOfRows++;
while (numOfRows < rows) {
pConn = mndGetNextConn(pMnode, pShow->pIter);
if (pConn == NULL) {
pShow->pIter = NULL;
break;
}
taosRUnLockLatch(&pConn->queryLock);
int32_t packedRows = packQueriesIntoBlock(pShow, pConn, pBlock, 0, rows - numOfRows);
pShow->curIterPackedRows = packedRows;
numOfRows += packedRows;
}
pShow->numOfRows += numOfRows;
return numOfRows;
}
......
......@@ -81,54 +81,50 @@ IF (TD_VNODE_PLUGINS)
)
ENDIF ()
IF (NOT ${TD_LINUX})
target_include_directories(
vnode
PUBLIC "inc"
PUBLIC "src/inc"
PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar"
PUBLIC "${TD_SOURCE_DIR}/contrib/rocksdb/include"
)
ELSE()
target_include_directories(
vnode
PUBLIC "inc"
PUBLIC "src/inc"
PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar"
)
ENDIF (NOT ${TD_LINUX})
IF (TD_LINUX)
target_include_directories(
vnode
PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/rocksdb_static"
# IF (NOT ${TD_LINUX})
# target_include_directories(
# vnode
# PUBLIC "inc"
# PUBLIC "src/inc"
# PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar"
# PUBLIC "${TD_SOURCE_DIR}/contrib/rocksdb/include"
# )
# ELSE()
# target_include_directories(
# vnode
# PUBLIC "inc"
# PUBLIC "src/inc"
# PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar"
# )
#ENDIF(NOT ${TD_LINUX})
if (${BUILD_CONTRIB})
target_include_directories(
vnode
PUBLIC "inc"
PUBLIC "src/inc"
PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar"
PUBLIC "${TD_SOURCE_DIR}/contrib/rocksdb/include"
)
target_link_directories(
vnode
PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/rocksdb_static"
else()
target_include_directories(
vnode
PUBLIC "inc"
PUBLIC "src/inc"
PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar"
)
target_link_libraries(
vnode
PUBLIC os
PUBLIC util
PUBLIC common
PUBLIC tfs
PUBLIC wal
PUBLIC qworker
PUBLIC sync
PUBLIC executor
PUBLIC scheduler
PUBLIC tdb
if (${TD_LINUX})
target_include_directories(
vnode
PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/rocksdb_static"
)
target_link_directories(
vnode
PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/rocksdb_static"
)
endif()
endif()
# PUBLIC bdb
# PUBLIC scalar
PUBLIC rocksdb
PUBLIC transport
PUBLIC stream
PUBLIC index
)
ELSE()
target_link_libraries(
vnode
PUBLIC os
......@@ -149,7 +145,6 @@ target_link_libraries(
PUBLIC stream
PUBLIC index
)
ENDIF()
IF (TD_GRANT)
TARGET_LINK_LIBRARIES(vnode PUBLIC grant)
......
......@@ -357,19 +357,25 @@ typedef struct {
STSchema *pTSchema;
} SRocksCache;
typedef struct {
STsdb *pTsdb;
int flush_count;
} SCacheFlushState;
struct STsdb {
char *path;
SVnode *pVnode;
STsdbKeepCfg keepCfg;
TdThreadRwlock rwLock;
SMemTable *mem;
SMemTable *imem;
STsdbFS fs;
SLRUCache *lruCache;
TdThreadMutex lruMutex;
SLRUCache *biCache;
TdThreadMutex biMutex;
SRocksCache rCache;
char *path;
SVnode *pVnode;
STsdbKeepCfg keepCfg;
TdThreadRwlock rwLock;
SMemTable *mem;
SMemTable *imem;
STsdbFS fs;
SLRUCache *lruCache;
SCacheFlushState flushState;
TdThreadMutex lruMutex;
SLRUCache *biCache;
TdThreadMutex biMutex;
SRocksCache rCache;
};
struct TSDBKEY {
......
......@@ -151,7 +151,6 @@ int32_t metaCacheOpen(SMeta* pMeta) {
taosHashSetFreeFp(pCache->sTagFilterResCache.pTableEntry, freeCacheEntryFp);
taosThreadMutexInit(&pCache->sTagFilterResCache.lock, NULL);
pCache->STbGroupResCache.pResCache = taosLRUCacheInit(5 * 1024 * 1024, -1, 0.5);
if (pCache->STbGroupResCache.pResCache == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
......@@ -169,7 +168,6 @@ int32_t metaCacheOpen(SMeta* pMeta) {
taosHashSetFreeFp(pCache->STbGroupResCache.pTableEntry, freeCacheEntryFp);
taosThreadMutexInit(&pCache->STbGroupResCache.lock, NULL);
pMeta->pCache = pCache;
return code;
......@@ -486,14 +484,14 @@ static int checkAllEntriesInCache(const STagFilterResEntry* pEntry, SArray* pInv
}
static FORCE_INLINE void setMD5DigestInKey(uint64_t* pBuf, const char* key, int32_t keyLen) {
// ASSERT(keyLen == sizeof(int64_t) * 2);
// ASSERT(keyLen == sizeof(int64_t) * 2);
memcpy(&pBuf[2], key, keyLen);
}
// the format of key:
// hash table address(8bytes) + suid(8bytes) + MD5 digest(16bytes)
static void initCacheKey(uint64_t* buf, const SHashObj* pHashMap, uint64_t suid, const char* key, int32_t keyLen) {
buf[0] = (uint64_t) pHashMap;
buf[0] = (uint64_t)pHashMap;
buf[1] = suid;
setMD5DigestInKey(buf, key, keyLen);
ASSERT(keyLen == sizeof(uint64_t) * 2);
......@@ -501,7 +499,7 @@ static void initCacheKey(uint64_t* buf, const SHashObj* pHashMap, uint64_t suid,
int32_t metaGetCachedTableUidList(void* pVnode, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray* pList1,
bool* acquireRes) {
SMeta* pMeta = ((SVnode*)pVnode)->pMeta;
SMeta* pMeta = ((SVnode*)pVnode)->pMeta;
int32_t vgId = TD_VID(pMeta->pVnode);
// generate the composed key for LRU cache
......@@ -541,7 +539,8 @@ int32_t metaGetCachedTableUidList(void* pVnode, tb_uid_t suid, const uint8_t* pK
uint32_t acc = pMeta->pCache->sTagFilterResCache.accTimes;
if ((*pEntry)->hitTimes % 5000 == 0 && (*pEntry)->hitTimes > 0) {
metaInfo("vgId:%d cache hit:%d, total acc:%d, rate:%.2f", vgId, (*pEntry)->hitTimes, acc, ((double)(*pEntry)->hitTimes) / acc);
metaInfo("vgId:%d cache hit:%d, total acc:%d, rate:%.2f", vgId, (*pEntry)->hitTimes, acc,
((double)(*pEntry)->hitTimes) / acc);
}
taosLRUCacheRelease(pCache, pHandle, false);
......@@ -551,7 +550,8 @@ int32_t metaGetCachedTableUidList(void* pVnode, tb_uid_t suid, const uint8_t* pK
return TSDB_CODE_SUCCESS;
}
static void freeUidCachePayload(const void* key, size_t keyLen, void* value) {
static void freeUidCachePayload(const void* key, size_t keyLen, void* value, void* ud) {
(void)ud;
if (value == NULL) {
return;
}
......@@ -607,7 +607,7 @@ static int32_t addNewEntry(SHashObj* pTableEntry, const void* pKey, int32_t keyL
int32_t metaUidFilterCachePut(void* pVnode, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload,
int32_t payloadLen, double selectivityRatio) {
int32_t code = 0;
SMeta* pMeta = ((SVnode*)pVnode)->pMeta;
SMeta* pMeta = ((SVnode*)pVnode)->pMeta;
int32_t vgId = TD_VID(pMeta->pVnode);
if (selectivityRatio > tsSelectivityRatio) {
......@@ -640,7 +640,7 @@ int32_t metaUidFilterCachePut(void* pVnode, uint64_t suid, const void* pKey, int
if (code != TSDB_CODE_SUCCESS) {
goto _end;
}
} else { // check if it exists or not
} else { // check if it exists or not
size_t size = listNEles(&(*pEntry)->list);
if (size == 0) {
tdListAppend(&(*pEntry)->list, pKey);
......@@ -659,7 +659,7 @@ int32_t metaUidFilterCachePut(void* pVnode, uint64_t suid, const void* pKey, int
// add to cache.
taosLRUCacheInsert(pCache, key, TAG_FILTER_RES_KEY_LEN, pPayload, payloadLen, freeUidCachePayload, NULL,
TAOS_LRU_PRIORITY_LOW);
TAOS_LRU_PRIORITY_LOW, NULL);
_end:
taosThreadMutexUnlock(pLock);
metaDebug("vgId:%d, suid:%" PRIu64 " list cache added into cache, total:%d, tables:%d", vgId, suid,
......@@ -675,7 +675,7 @@ int32_t metaUidCacheClear(SMeta* pMeta, uint64_t suid) {
SHashObj* pEntryHashMap = pMeta->pCache->sTagFilterResCache.pTableEntry;
uint64_t dummy[2] = {0};
initCacheKey(p, pEntryHashMap, suid, (char*) &dummy[0], 16);
initCacheKey(p, pEntryHashMap, suid, (char*)&dummy[0], 16);
TdThreadMutex* pLock = &pMeta->pCache->sTagFilterResCache.lock;
taosThreadMutexLock(pLock);
......@@ -700,12 +700,12 @@ int32_t metaUidCacheClear(SMeta* pMeta, uint64_t suid) {
tdListEmpty(&(*pEntry)->list);
taosThreadMutexUnlock(pLock);
metaDebug("vgId:%d suid:%"PRId64" cached related tag filter uid list cleared", vgId, suid);
metaDebug("vgId:%d suid:%" PRId64 " cached related tag filter uid list cleared", vgId, suid);
return TSDB_CODE_SUCCESS;
}
int32_t metaGetCachedTbGroup(void* pVnode, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray** pList) {
SMeta* pMeta = ((SVnode*)pVnode)->pMeta;
SMeta* pMeta = ((SVnode*)pVnode)->pMeta;
int32_t vgId = TD_VID(pMeta->pVnode);
// generate the composed key for LRU cache
......@@ -738,7 +738,8 @@ int32_t metaGetCachedTbGroup(void* pVnode, tb_uid_t suid, const uint8_t* pKey, i
uint32_t acc = pMeta->pCache->STbGroupResCache.accTimes;
if ((*pEntry)->hitTimes % 5000 == 0 && (*pEntry)->hitTimes > 0) {
metaInfo("vgId:%d tb group cache hit:%d, total acc:%d, rate:%.2f", vgId, (*pEntry)->hitTimes, acc, ((double)(*pEntry)->hitTimes) / acc);
metaInfo("vgId:%d tb group cache hit:%d, total acc:%d, rate:%.2f", vgId, (*pEntry)->hitTimes, acc,
((double)(*pEntry)->hitTimes) / acc);
}
taosLRUCacheRelease(pCache, pHandle, false);
......@@ -748,8 +749,8 @@ int32_t metaGetCachedTbGroup(void* pVnode, tb_uid_t suid, const uint8_t* pKey, i
return TSDB_CODE_SUCCESS;
}
static void freeTbGroupCachePayload(const void* key, size_t keyLen, void* value) {
static void freeTbGroupCachePayload(const void* key, size_t keyLen, void* value, void* ud) {
(void)ud;
if (value == NULL) {
return;
}
......@@ -778,8 +779,8 @@ static void freeTbGroupCachePayload(const void* key, size_t keyLen, void* value)
taosMemoryFree(tmp);
double el = (taosGetTimestampUs() - st) / 1000.0;
metaDebug("clear one item in tb group cache, remain cached item:%d, elapsed time:%.2fms", listNEles(&((*pEntry)->list)),
el);
metaDebug("clear one item in tb group cache, remain cached item:%d, elapsed time:%.2fms",
listNEles(&((*pEntry)->list)), el);
break;
}
}
......@@ -788,11 +789,10 @@ static void freeTbGroupCachePayload(const void* key, size_t keyLen, void* value)
taosArrayDestroy((SArray*)value);
}
int32_t metaPutTbGroupToCache(void* pVnode, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload,
int32_t payloadLen) {
int32_t code = 0;
SMeta* pMeta = ((SVnode*)pVnode)->pMeta;
SMeta* pMeta = ((SVnode*)pVnode)->pMeta;
int32_t vgId = TD_VID(pMeta->pVnode);
if (payloadLen > tsTagFilterResCacheSize) {
......@@ -817,7 +817,7 @@ int32_t metaPutTbGroupToCache(void* pVnode, uint64_t suid, const void* pKey, int
if (code != TSDB_CODE_SUCCESS) {
goto _end;
}
} else { // check if it exists or not
} else { // check if it exists or not
size_t size = listNEles(&(*pEntry)->list);
if (size == 0) {
tdListAppend(&(*pEntry)->list, pKey);
......@@ -836,7 +836,7 @@ int32_t metaPutTbGroupToCache(void* pVnode, uint64_t suid, const void* pKey, int
// add to cache.
taosLRUCacheInsert(pCache, key, TAG_FILTER_RES_KEY_LEN, pPayload, payloadLen, freeTbGroupCachePayload, NULL,
TAOS_LRU_PRIORITY_LOW);
TAOS_LRU_PRIORITY_LOW, NULL);
_end:
taosThreadMutexUnlock(pLock);
metaDebug("vgId:%d, suid:%" PRIu64 " tb group added into cache, total:%d, tables:%d", vgId, suid,
......@@ -852,7 +852,7 @@ int32_t metaTbGroupCacheClear(SMeta* pMeta, uint64_t suid) {
SHashObj* pEntryHashMap = pMeta->pCache->STbGroupResCache.pTableEntry;
uint64_t dummy[2] = {0};
initCacheKey(p, pEntryHashMap, suid, (char*) &dummy[0], 16);
initCacheKey(p, pEntryHashMap, suid, (char*)&dummy[0], 16);
TdThreadMutex* pLock = &pMeta->pCache->STbGroupResCache.lock;
taosThreadMutexLock(pLock);
......@@ -877,8 +877,6 @@ int32_t metaTbGroupCacheClear(SMeta* pMeta, uint64_t suid) {
tdListEmpty(&(*pEntry)->list);
taosThreadMutexUnlock(pLock);
metaDebug("vgId:%d suid:%"PRId64" cached related tb group cleared", vgId, suid);
metaDebug("vgId:%d suid:%" PRId64 " cached related tb group cleared", vgId, suid);
return TSDB_CODE_SUCCESS;
}
......@@ -706,7 +706,6 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
tqError("vgId:%d, build new consumer handle %s for consumer:0x%" PRIx64 ", but old consumerId:0x%" PRIx64,
req.vgId, req.subKey, req.newConsumerId, req.oldConsumerId);
}
if (req.newConsumerId == -1) {
tqError("vgId:%d, tq invalid re-balance request, new consumerId %" PRId64 "", req.vgId, req.newConsumerId);
goto end;
......
......@@ -405,7 +405,7 @@ int32_t tqMetaRestoreHandle(STQ* pTq) {
while (tdbTbcNext(pCur, &pKey, &kLen, &pVal, &vLen) == 0) {
STqHandle handle = {0};
code = restoreHandle(pTq, pVal, vLen, &handle);
if (code < 0){
if (code < 0) {
tqDestroyTqHandle(&handle);
break;
}
......
......@@ -2846,18 +2846,18 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) {
setBlockAllDumped(pDumpInfo, pBlock->maxKey.ts, pReader->order);
return code;
}
pBlockScanInfo = getTableBlockScanInfo(pReader->status.pTableMap, pBlockInfo->uid, pReader->idStr);
if (pBlockScanInfo == NULL) {
goto _end;
}
TSDBKEY keyInBuf = getCurrentKeyInBuf(pBlockScanInfo, pReader);
TSDBKEY keyInBuf = getCurrentKeyInBuf(pBlockScanInfo, pReader);
// it is a clean block, load it directly
if (isCleanFileDataBlock(pReader, pBlockInfo, pBlock, pBlockScanInfo, keyInBuf, pLastBlockReader) &&
pBlock->nRow <= pReader->resBlockInfo.capacity) {
if (asc || (!hasDataInLastBlock(pLastBlockReader))) {
if (asc || (!hasDataInLastBlock(pLastBlockReader) && (pBlock->maxKey.ts > keyInBuf.ts))) {
code = copyBlockDataToSDataBlock(pReader);
if (code) {
goto _end;
......
......@@ -107,6 +107,10 @@ static int32_t vnodePreProcessCreateTableMsg(SVnode *pVnode, SRpcMsg *pMsg) {
_exit:
tDecoderClear(&dc);
if (code) {
vError("vgId:%d, %s:%d failed to preprocess submit request since %s, msg type:%s", TD_VID(pVnode), __func__, lino,
tstrerror(code), TMSG_INFO(pMsg->msgType));
}
return code;
}
......@@ -272,11 +276,11 @@ static int32_t vnodePreProcessSubmitMsg(SVnode *pVnode, SRpcMsg *pMsg) {
tEndDecode(pCoder);
_exit:
tDecoderClear(pCoder);
if (code) {
vError("vgId:%d, failed to preprocess submit request since %s, msg type:%d", TD_VID(pVnode), tstrerror(code),
pMsg->msgType);
vError("vgId:%d, %s:%d failed to preprocess submit request since %s, msg type:%s", TD_VID(pVnode), __func__, lino,
tstrerror(code), TMSG_INFO(pMsg->msgType));
}
tDecoderClear(pCoder);
return code;
}
......@@ -367,8 +371,8 @@ int32_t vnodePreProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg) {
_exit:
if (code) {
vError("vgId:%d, failed to preprocess write request since %s, msg type:%d", TD_VID(pVnode), tstrerror(code),
pMsg->msgType);
vError("vgId:%d, failed to preprocess write request since %s, msg type:%s", TD_VID(pVnode), tstrerror(code),
TMSG_INFO(pMsg->msgType));
}
return code;
}
......
......@@ -170,6 +170,10 @@ SSortExecInfo tsortGetSortExecInfo(SSortHandle* pHandle);
*/
int32_t getProperSortPageSize(size_t rowSize, uint32_t numOfCols);
bool tsortIsClosed(SSortHandle* pHandle);
void tsortSetClosed(SSortHandle* pHandle);
#ifdef __cplusplus
}
#endif
......
......@@ -26,8 +26,8 @@
#include "executil.h"
#include "executorInt.h"
#include "querytask.h"
#include "tcompression.h"
#include "storageapi.h"
#include "tcompression.h"
typedef struct tagFilterAssist {
SHashObj* colHash;
......@@ -42,13 +42,13 @@ typedef enum {
} FilterCondType;
static FilterCondType checkTagCond(SNode* cond);
static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* list, SNode* pTagCond, SStorageAPI* pAPI);
static int32_t optimizeTbnameInCondImpl(void* metaHandle, SArray* list, SNode* pTagCond, SStorageAPI* pStoreAPI);
static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* list, SNode* pTagCond, SStorageAPI* pAPI);
static int32_t optimizeTbnameInCondImpl(void* metaHandle, SArray* list, SNode* pTagCond, SStorageAPI* pStoreAPI);
static int32_t getTableList(void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond,
SNode* pTagIndexCond, STableListInfo* pListInfo, uint8_t* digest, const char* idstr, SStorageAPI* pStorageAPI);
static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList,
void* pVnode, SStorageAPI* pStorageAPI);
static int32_t getTableList(void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond,
STableListInfo* pListInfo, uint8_t* digest, const char* idstr, SStorageAPI* pStorageAPI);
static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* pVnode,
SStorageAPI* pStorageAPI);
static int64_t getLimit(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->limit; }
static int64_t getOffset(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->offset; }
......@@ -302,7 +302,7 @@ EDealRes doTranslateTagExpr(SNode** pNode, void* pContext) {
return DEAL_RES_CONTINUE;
}
int32_t isQualifiedTable(STableKeyInfo* info, SNode* pTagCond, void* metaHandle, bool* pQualified, SStorageAPI *pAPI) {
int32_t isQualifiedTable(STableKeyInfo* info, SNode* pTagCond, void* metaHandle, bool* pQualified, SStorageAPI* pAPI) {
int32_t code = TSDB_CODE_SUCCESS;
SMetaReader mr = {0};
......@@ -495,7 +495,8 @@ int32_t getColInfoResultForGroupby(void* pVnode, SNodeList* group, STableListInf
genTbGroupDigest((SNode*)listNode, digest, &context);
nodesFree(listNode);
pAPI->metaFn.metaGetCachedTbGroup(pVnode, pTableListInfo->idInfo.suid, context.digest, tListLen(context.digest), &tableList);
pAPI->metaFn.metaGetCachedTbGroup(pVnode, pTableListInfo->idInfo.suid, context.digest, tListLen(context.digest),
&tableList);
if (tableList) {
taosArrayDestroy(pTableListInfo->pTableList);
pTableListInfo->pTableList = tableList;
......@@ -632,7 +633,8 @@ int32_t getColInfoResultForGroupby(void* pVnode, SNodeList* group, STableListInf
if (tsTagFilterCache) {
tableList = taosArrayDup(pTableListInfo->pTableList, NULL);
pAPI->metaFn.metaPutTbGroupToCache(pVnode, pTableListInfo->idInfo.suid, context.digest, tListLen(context.digest), tableList, taosArrayGetSize(tableList) * sizeof(STableKeyInfo));
pAPI->metaFn.metaPutTbGroupToCache(pVnode, pTableListInfo->idInfo.suid, context.digest, tListLen(context.digest),
tableList, taosArrayGetSize(tableList) * sizeof(STableKeyInfo));
}
// int64_t st2 = taosGetTimestampUs();
......@@ -776,7 +778,8 @@ static int32_t optimizeTbnameInCond(void* pVnode, int64_t suid, SArray* list, SN
}
// only return uid that does not contained in pExistedUidList
static int32_t optimizeTbnameInCondImpl(void* pVnode, SArray* pExistedUidList, SNode* pTagCond, SStorageAPI* pStoreAPI) {
static int32_t optimizeTbnameInCondImpl(void* pVnode, SArray* pExistedUidList, SNode* pTagCond,
SStorageAPI* pStoreAPI) {
if (nodeType(pTagCond) != QUERY_NODE_OPERATOR) {
return -1;
}
......@@ -839,8 +842,8 @@ static int32_t optimizeTbnameInCondImpl(void* pVnode, SArray* pExistedUidList, S
return -1;
}
static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList,
void* pVnode, SStorageAPI* pStorageAPI) {
static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* pVnode,
SStorageAPI* pStorageAPI) {
SSDataBlock* pResBlock = createDataBlock();
if (pResBlock == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
......@@ -1080,8 +1083,8 @@ int32_t getTableList(void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, S
genTagFilterDigest(pTagCond, &context);
bool acquired = false;
pStorageAPI->metaFn.getCachedTableList(pVnode, pScanNode->suid, context.digest, tListLen(context.digest), pUidList,
&acquired);
pStorageAPI->metaFn.getCachedTableList(pVnode, pScanNode->suid, context.digest, tListLen(context.digest),
pUidList, &acquired);
if (acquired) {
digest[0] = 1;
memcpy(digest + 1, context.digest, tListLen(context.digest));
......@@ -1097,13 +1100,15 @@ int32_t getTableList(void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, S
if (pTagIndexCond) {
void* pIndex = pStorageAPI->metaFn.getInvertIndex(pVnode);
SIndexMetaArg metaArg = {
.metaEx = pVnode, .idx = pStorageAPI->metaFn.storeGetIndexInfo(pVnode), .ivtIdx = pIndex, .suid = pScanNode->uid};
SIndexMetaArg metaArg = {.metaEx = pVnode,
.idx = pStorageAPI->metaFn.storeGetIndexInfo(pVnode),
.ivtIdx = pIndex,
.suid = pScanNode->uid};
status = SFLT_NOT_INDEX;
code = doFilterTag(pTagIndexCond, &metaArg, pUidList, &status, &pStorageAPI->metaFilter);
if (code != 0 || status == SFLT_NOT_INDEX) { // temporarily disable it for performance sake
qWarn("failed to get tableIds from index, suid:%" PRIu64, pScanNode->uid);
qDebug("failed to get tableIds from index, suid:%" PRIu64, pScanNode->uid);
code = TSDB_CODE_SUCCESS;
} else {
qInfo("succ to get filter result, table num: %d", (int)taosArrayGetSize(pUidList));
......@@ -1128,7 +1133,7 @@ int32_t getTableList(void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, S
memcpy(pPayload + sizeof(int32_t), taosArrayGet(pUidList, 0), numOfTables * sizeof(uint64_t));
}
// metaUidFilterCachePut(metaHandle, pScanNode->suid, context.digest, tListLen(context.digest), pPayload, size, 1);
pStorageAPI->metaFn.putCachedTableList(pVnode, pScanNode->suid, context.digest, tListLen(context.digest), pPayload, size, 1);
digest[0] = 1;
memcpy(digest + 1, context.digest, tListLen(context.digest));
}
......@@ -1152,15 +1157,17 @@ _end:
return code;
}
int32_t qGetTableList(int64_t suid, void* pVnode, void* node, SArray **tableList, void* pTaskInfo){
SSubplan *pSubplan = (SSubplan *)node;
int32_t qGetTableList(int64_t suid, void* pVnode, void* node, SArray** tableList, void* pTaskInfo) {
SSubplan* pSubplan = (SSubplan*)node;
SScanPhysiNode pNode = {0};
pNode.suid = suid;
pNode.uid = suid;
pNode.tableType = TSDB_SUPER_TABLE;
STableListInfo* pTableListInfo = tableListCreate();
uint8_t digest[17] = {0};
int code = getTableList(pVnode, &pNode, pSubplan ? pSubplan->pTagCond : NULL, pSubplan ? pSubplan->pTagIndexCond : NULL, pTableListInfo, digest, "qGetTableList", &((SExecTaskInfo*)pTaskInfo)->storageAPI);
uint8_t digest[17] = {0};
int code =
getTableList(pVnode, &pNode, pSubplan ? pSubplan->pTagCond : NULL, pSubplan ? pSubplan->pTagIndexCond : NULL,
pTableListInfo, digest, "qGetTableList", &((SExecTaskInfo*)pTaskInfo)->storageAPI);
*tableList = pTableListInfo->pTableList;
pTableListInfo->pTableList = NULL;
tableListDestroy(pTableListInfo);
......@@ -1181,7 +1188,7 @@ size_t getTableTagsBufLen(const SNodeList* pGroups) {
}
int32_t getGroupIdFromTagsVal(void* pVnode, uint64_t uid, SNodeList* pGroupNode, char* keyBuf, uint64_t* pGroupId,
SStorageAPI* pAPI) {
SStorageAPI* pAPI) {
SMetaReader mr = {0};
pAPI->metaReaderFn.initReader(&mr, pVnode, 0, &pAPI->metaFn);
......@@ -1560,7 +1567,8 @@ static int32_t setSelectValueColumnInfo(SqlFunctionCtx* pCtx, int32_t numOfOutpu
return TSDB_CODE_SUCCESS;
}
SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, int32_t** rowEntryInfoOffset, SFunctionStateStore* pStore) {
SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, int32_t** rowEntryInfoOffset,
SFunctionStateStore* pStore) {
SqlFunctionCtx* pFuncCtx = (SqlFunctionCtx*)taosMemoryCalloc(numOfOutput, sizeof(SqlFunctionCtx));
if (pFuncCtx == NULL) {
return NULL;
......@@ -1849,7 +1857,7 @@ void getNextTimeWindow(const SInterval* pInterval, STimeWindow* tw, int32_t orde
}
struct tm tm;
time_t t = (time_t) key;
time_t t = (time_t)key;
taosLocalTime(&t, &tm, NULL);
int mon = (int)(tm.tm_year * 12 + tm.tm_mon + duration * factor);
......@@ -2079,8 +2087,8 @@ static int32_t sortTableGroup(STableListInfo* pTableListInfo) {
return TSDB_CODE_SUCCESS;
}
int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle* pHandle, SScanPhysiNode* pScanNode, SNodeList* group,
bool groupSort, uint8_t *digest, SStorageAPI* pAPI) {
int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle* pHandle, SScanPhysiNode* pScanNode,
SNodeList* group, bool groupSort, uint8_t* digest, SStorageAPI* pAPI) {
int32_t code = TSDB_CODE_SUCCESS;
bool groupByTbname = groupbyTbname(group);
......@@ -2132,7 +2140,8 @@ int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags
}
uint8_t digest[17] = {0};
int32_t code = getTableList(pHandle->vnode, pScanNode, pTagCond, pTagIndexCond, pTableListInfo, digest, idStr, &pTaskInfo->storageAPI);
int32_t code = getTableList(pHandle->vnode, pScanNode, pTagCond, pTagIndexCond, pTableListInfo, digest, idStr,
&pTaskInfo->storageAPI);
if (code != TSDB_CODE_SUCCESS) {
qError("failed to getTableList, code: %s", tstrerror(code));
return code;
......@@ -2150,7 +2159,8 @@ int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags
return TSDB_CODE_SUCCESS;
}
code = buildGroupIdMapForAllTables(pTableListInfo, pHandle, pScanNode, pGroupTags, groupSort, digest, &pTaskInfo->storageAPI);
code = buildGroupIdMapForAllTables(pTableListInfo, pHandle, pScanNode, pGroupTags, groupSort, digest,
&pTaskInfo->storageAPI);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
......
......@@ -185,6 +185,8 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) {
// the scan order may be different from the output result order for agg interval operator.
if (pDownstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL) {
order = ((SIntervalAggOperatorInfo*) pDownstream->info)->resultTsOrder;
} else {
order = pInfo->pFillInfo->order;
}
#endif
......
......@@ -55,7 +55,6 @@ typedef struct STableMergeScanSortSourceParam {
int32_t readerIdx;
uint64_t uid;
SSDataBlock* inputBlock;
bool multiReader;
STsdbReader* dataReader;
} STableMergeScanSortSourceParam;
......@@ -466,7 +465,12 @@ static STableCachedVal* createTableCacheVal(const SMetaReader* pMetaReader) {
}
// const void *key, size_t keyLen, void *value
static void freeCachedMetaItem(const void* key, size_t keyLen, void* value) { freeTableCachedVal(value); }
static void freeCachedMetaItem(const void* key, size_t keyLen, void* value, void* ud) {
(void)key;
(void)keyLen;
(void)ud;
freeTableCachedVal(value);
}
static void doSetNullValue(SSDataBlock* pBlock, const SExprInfo* pExpr, int32_t numOfExpr) {
for (int32_t j = 0; j < numOfExpr; ++j) {
......@@ -501,7 +505,7 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int
// 1. check if it is existed in meta cache
if (pCache == NULL) {
pHandle->api.metaReaderFn.initReader(&mr, pHandle->vnode, 0, &pHandle->api.metaFn);
pHandle->api.metaReaderFn.initReader(&mr, pHandle->vnode, META_READER_NOLOCK, &pHandle->api.metaFn);
code = pHandle->api.metaReaderFn.getEntryGetUidCache(&mr, pBlock->info.id.uid);
if (code != TSDB_CODE_SUCCESS) {
// when encounter the TSDB_CODE_PAR_TABLE_NOT_EXIST error, we proceed.
......@@ -554,7 +558,7 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int
freeReader = true;
int32_t ret = taosLRUCacheInsert(pCache->pTableMetaEntryCache, &pBlock->info.id.uid, sizeof(uint64_t), pVal,
sizeof(STableCachedVal), freeCachedMetaItem, NULL, TAOS_LRU_PRIORITY_LOW);
sizeof(STableCachedVal), freeCachedMetaItem, NULL, TAOS_LRU_PRIORITY_LOW, NULL);
if (ret != TAOS_LRU_STATUS_OK) {
qError("failed to put meta into lru cache, code:%d, %s", ret, idStr);
freeTableCachedVal(pVal);
......@@ -2654,8 +2658,7 @@ static SSDataBlock* getTableDataBlockImpl(void* param) {
int64_t st = taosGetTimestampUs();
void* p = tableListGetInfo(pInfo->base.pTableListInfo, readIdx + pInfo->tableStartIndex);
SReadHandle* pHandle = &pInfo->base.readHandle;
if (NULL == source->dataReader || !source->multiReader) {
if (NULL == source->dataReader) {
code = pAPI->tsdReader.tsdReaderOpen(pHandle->vnode, pQueryCond, p, 1, pBlock, (void**)&source->dataReader, GET_TASKID(pTaskInfo), false, NULL);
if (code != 0) {
T_LONG_JMP(pTaskInfo->env, code);
......@@ -2719,19 +2722,15 @@ static SSDataBlock* getTableDataBlockImpl(void* param) {
pInfo->base.readRecorder.elapsedTime += (taosGetTimestampUs() - st) / 1000.0;
qTrace("tsdb/read-table-data: %p, close reader", reader);
if (!source->multiReader) {
pAPI->tsdReader.tsdReaderClose(pInfo->base.dataReader);
source->dataReader = NULL;
}
pInfo->base.dataReader = NULL;
return pBlock;
}
if (!source->multiReader) {
pAPI->tsdReader.tsdReaderClose(pInfo->base.dataReader);
source->dataReader = NULL;
}
pAPI->tsdReader.tsdReaderClose(source->dataReader);
source->dataReader = NULL;
pInfo->base.dataReader = NULL;
blockDataDestroy(source->inputBlock);
source->inputBlock = NULL;
return NULL;
}
......@@ -2787,7 +2786,19 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) {
// todo the total available buffer should be determined by total capacity of buffer of this task.
// the additional one is reserved for merge result
pInfo->sortBufSize = pInfo->bufPageSize * (tableEndIdx - tableStartIdx + 1 + 1);
// pInfo->sortBufSize = pInfo->bufPageSize * (tableEndIdx - tableStartIdx + 1 + 1);
int32_t kWay = (TSDB_MAX_BYTES_PER_ROW * 2) / (pInfo->pResBlock->info.rowSize);
if (kWay >= 128) {
kWay = 128;
} else if (kWay <= 2) {
kWay = 2;
} else {
int i = 2;
while (i * 2 <= kWay) i = i * 2;
kWay = i;
}
pInfo->sortBufSize = pInfo->bufPageSize * (kWay + 1);
int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize;
pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, SORT_MULTISOURCE_MERGE, pInfo->bufPageSize, numOfBufPage,
pInfo->pSortInputBlock, pTaskInfo->id.str);
......@@ -2802,9 +2813,7 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) {
STableMergeScanSortSourceParam param = {0};
param.readerIdx = i;
param.pOperator = pOperator;
param.multiReader = (numOfTable <= MULTI_READER_MAX_TABLE_NUM) ? true : false;
param.inputBlock = createOneDataBlock(pInfo->pResBlock, false);
blockDataEnsureCapacity(param.inputBlock, pOperator->resultInfo.capacity);
taosArrayPush(pInfo->sortSourceParams, &param);
......@@ -2887,6 +2896,11 @@ SSDataBlock* getSortedTableMergeScanBlockData(SSortHandle* pHandle, SSDataBlock*
}
}
if (tsortIsClosed(pHandle)) {
terrno = TSDB_CODE_TSC_QUERY_CANCELLED;
T_LONG_JMP(pOperator->pTaskInfo->env, terrno);
}
bool limitReached = applyLimitOffset(&pInfo->limitInfo, pResBlock, pTaskInfo);
qDebug("%s get sorted row block, rows:%" PRId64 ", limit:%" PRId64, GET_TASKID(pTaskInfo), pResBlock->info.rows,
pInfo->limitInfo.numOfOutputRows);
......
......@@ -233,6 +233,11 @@ SSDataBlock* doSort(SOperatorInfo* pOperator) {
// multi-group case not handle here
SSDataBlock* pBlock = NULL;
while (1) {
if (tsortIsClosed(pInfo->pSortHandle)) {
terrno = TSDB_CODE_TSC_QUERY_CANCELLED;
T_LONG_JMP(pOperator->pTaskInfo->env, terrno);
}
pBlock = getSortedBlockData(pInfo->pSortHandle, pInfo->binfo.pRes, pOperator->resultInfo.capacity,
pInfo->matchInfo.pList, pInfo);
if (pBlock == NULL) {
......@@ -445,6 +450,11 @@ SSDataBlock* doGroupSort(SOperatorInfo* pOperator) {
SSDataBlock* pBlock = NULL;
while (pInfo->pCurrSortHandle != NULL) {
if (tsortIsClosed(pInfo->pCurrSortHandle)) {
terrno = TSDB_CODE_TSC_QUERY_CANCELLED;
T_LONG_JMP(pOperator->pTaskInfo->env, terrno);
}
// beginSortGroup would fetch all child blocks of pInfo->currGroupId;
ASSERT(pInfo->childOpStatus != CHILD_OP_SAME_GROUP);
pBlock = getGroupSortedBlockData(pInfo->pCurrSortHandle, pInfo->binfo.pRes, pOperator->resultInfo.capacity,
......
......@@ -1601,6 +1601,8 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) {
SSysTableScanInfo* pInfo = pOperator->info;
char dbName[TSDB_DB_NAME_LEN] = {0};
blockDataCleanup(pInfo->pRes);
const char* name = tNameGetTableName(&pInfo->name);
if (pInfo->showRewrite) {
getDBNameFromCondition(pInfo->pCondition, dbName);
......
......@@ -46,6 +46,7 @@ struct SSortHandle {
SMsortComparParam cmpParam;
int32_t numOfCompletedSources;
bool opened;
int8_t closed;
const char* idStr;
bool inMemSort;
bool needAdjust;
......@@ -152,7 +153,7 @@ void tsortDestroySortHandle(SSortHandle* pSortHandle) {
tsortClose(pSortHandle);
if (pSortHandle->pMergeTree != NULL) {
tMergeTreeDestroy(pSortHandle->pMergeTree);
tMergeTreeDestroy(&pSortHandle->pMergeTree);
}
destroyDiskbasedBuf(pSortHandle->pBuf);
......@@ -581,6 +582,11 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) {
SArray* pPageIdList = taosArrayInit(4, sizeof(int32_t));
while (1) {
if (tsortIsClosed(pHandle)) {
code = terrno = TSDB_CODE_TSC_QUERY_CANCELLED;
return code;
}
SSDataBlock* pDataBlock = getSortedBlockDataInner(pHandle, &pHandle->cmpParam, numOfRows);
if (pDataBlock == NULL) {
break;
......@@ -609,7 +615,7 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) {
}
sortComparCleanup(&pHandle->cmpParam);
tMergeTreeDestroy(pHandle->pMergeTree);
tMergeTreeDestroy(&pHandle->pMergeTree);
pHandle->numOfCompletedSources = 0;
SSDataBlock* pBlock = createOneDataBlock(pHandle->pDataBlock, false);
......@@ -803,10 +809,19 @@ int32_t tsortOpen(SSortHandle* pHandle) {
}
int32_t tsortClose(SSortHandle* pHandle) {
// do nothing
atomic_val_compare_exchange_8(&pHandle->closed, 0, 1);
taosMsleep(10);
return TSDB_CODE_SUCCESS;
}
bool tsortIsClosed(SSortHandle* pHandle) {
return atomic_val_compare_exchange_8(&pHandle->closed, 1, 2);
}
void tsortSetClosed(SSortHandle* pHandle) {
atomic_store_8(&pHandle->closed, 2);
}
int32_t tsortSetFetchRawDataFp(SSortHandle* pHandle, _sort_fetch_block_fn_t fetchFp, void (*fp)(SSDataBlock*, void*),
void* param) {
pHandle->fetchfp = fetchFp;
......@@ -826,6 +841,9 @@ int32_t tsortSetCompareGroupId(SSortHandle* pHandle, bool compareGroupId) {
}
STupleHandle* tsortNextTuple(SSortHandle* pHandle) {
if (tsortIsClosed(pHandle)) {
return NULL;
}
if (pHandle->cmpParam.numOfSources == pHandle->numOfCompletedSources) {
return NULL;
}
......
......@@ -51,6 +51,7 @@ extern "C" {
#define FUNC_MGT_CUMULATIVE_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(22)
#define FUNC_MGT_INTERP_PC_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(23)
#define FUNC_MGT_GEOMETRY_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(24)
#define FUNC_MGT_FORBID_SYSTABLE_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(25)
#define FUNC_MGT_TEST_MASK(val, mask) (((val) & (mask)) != 0)
......
此差异已折叠。
......@@ -219,6 +219,8 @@ bool fmIsKeepOrderFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, F
bool fmIsCumulativeFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_CUMULATIVE_FUNC); }
bool fmIsForbidSysTableFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_FORBID_SYSTABLE_FUNC); }
bool fmIsInterpFunc(int32_t funcId) {
if (funcId < 0 || funcId >= funcMgtBuiltinsNum) {
return false;
......
......@@ -170,6 +170,8 @@ static char* getSyntaxErrFormat(int32_t errCode) {
return "%s function is not supported in stream query";
case TSDB_CODE_PAR_GROUP_BY_NOT_ALLOWED_FUNC:
return "%s function is not supported in group query";
case TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC:
return "%s function is not supported in system table query";
case TSDB_CODE_PAR_INVALID_INTERP_CLAUSE:
return "Invalid usage of RANGE clause, EVERY clause or FILL clause";
case TSDB_CODE_PAR_NO_VALID_FUNC_IN_WIN:
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册