diff --git a/CMakeLists.txt b/CMakeLists.txt index fb2b306f65467361dd5612f5bf1e6629a7a0c306..5048287cf6967a7e609fb559bfd6fdf352482ec4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -15,11 +15,15 @@ SET(TD_COMMUNITY_DIR ${PROJECT_SOURCE_DIR}) set(TD_SUPPORT_DIR "${TD_SOURCE_DIR}/cmake") set(TD_CONTRIB_DIR "${TD_SOURCE_DIR}/contrib") + + + include(${TD_SUPPORT_DIR}/cmake.platform) include(${TD_SUPPORT_DIR}/cmake.define) include(${TD_SUPPORT_DIR}/cmake.options) include(${TD_SUPPORT_DIR}/cmake.version) + # contrib add_subdirectory(contrib) diff --git a/README-CN.md b/README-CN.md index f830404af33a52bd7c10ef9b455b08db51a5442a..1d96a427092bd6f96ccff8d6666379c011ca1827 100644 --- a/README-CN.md +++ b/README-CN.md @@ -68,14 +68,14 @@ sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-d ```bash sudo yum install epel-release sudo yum update -sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel +sudo yum install -y gcc gcc-c++ make cmake3 gflags git openssl-devel sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake ``` ### CentOS 8/Fedora/Rocky Linux ```bash -sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel +sudo dnf install -y gcc gcc-c++ gflags make cmake epel-release git openssl-devel ``` #### 在 CentOS 上构建 taosTools 安装依赖软件 @@ -117,7 +117,7 @@ scl enable devtoolset-9 -- bash ### macOS ``` -brew install argp-standalone pkgconfig +brew install argp-standalone gflags pkgconfig ``` ### 设置 golang 开发环境 diff --git a/README.md b/README.md index f477a51a1ff8d1cf45acfbc41cbf2aec1b86ce1a..f065eb26853fbaae04e142f286bda76bd1b244f6 100644 --- a/README.md +++ b/README.md @@ -76,14 +76,14 @@ sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-d ```bash sudo yum install epel-release sudo yum update -sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel +sudo yum install -y gcc gcc-c++ make cmake3 gflags git openssl-devel sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake ``` ### CentOS 8/Fedora/Rocky Linux ```bash -sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel +sudo dnf install -y gcc gcc-c++ make cmake epel-release gflags git openssl-devel ``` #### Install build dependencies for taosTools on CentOS @@ -124,7 +124,7 @@ scl enable devtoolset-9 -- bash ### macOS ``` -brew install argp-standalone pkgconfig +brew install argp-standalone gflags pkgconfig ``` ### Setup golang environment diff --git a/cmake/cmake.options b/cmake/cmake.options index 555b72cbdf4955ec5945103716125b67c2ea6ed3..fa0b88841519305ff30b758356e66cf041d8d900 100644 --- a/cmake/cmake.options +++ b/cmake/cmake.options @@ -189,3 +189,9 @@ option( "If build release version" OFF ) + +option( + BUILD_CONTRIB + "If build thirdpart from source" + OFF +) diff --git a/cmake/cmake.platform b/cmake/cmake.platform index 25b442ab724c7a38eea076d8253de743a4f93bec..76ac6ba004674386672d45302649b134a61d4cfd 100644 --- a/cmake/cmake.platform +++ b/cmake/cmake.platform @@ -121,6 +121,12 @@ IF ("${CPUTYPE}" STREQUAL "") SET(TD_LOONGARCH_64 TRUE) ADD_DEFINITIONS("-D_TD_LOONGARCH_") ADD_DEFINITIONS("-D_TD_LOONGARCH_64") + ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "mips64") + SET(PLATFORM_ARCH_STR "mips") + MESSAGE(STATUS "input cpuType: mips64") + SET(TD_MIPS_64 TRUE) + ADD_DEFINITIONS("-D_TD_MIPS_") + ADD_DEFINITIONS("-D_TD_MIPS_64") ENDIF () ELSE () # if generate ARM version: @@ -176,6 +182,8 @@ set(TD_DEPS_DIR "x86") if (TD_LINUX) IF (TD_ARM_64 OR TD_ARM_32) set(TD_DEPS_DIR "arm") + ELSEIF (TD_MIPS_64) + set(TD_DEPS_DIR "mips") ELSE() set(TD_DEPS_DIR "x86") ENDIF() diff --git a/cmake/rocksdb_CMakeLists.txt.in b/cmake/rocksdb_CMakeLists.txt.in index 4f75b27f490b3b81fe9f6687f34a1d8297018dd6..f238ed20af79ba74d07927eb66b35456a00b279e 100644 --- a/cmake/rocksdb_CMakeLists.txt.in +++ b/cmake/rocksdb_CMakeLists.txt.in @@ -1,14 +1,29 @@ # rocksdb -IF (NOT ${TD_LINUX}) -ExternalProject_Add(rocksdb - GIT_REPOSITORY https://github.com/facebook/rocksdb.git - GIT_TAG v8.1.1 - SOURCE_DIR "${TD_CONTRIB_DIR}/rocksdb" - CONFIGURE_COMMAND "" - BUILD_COMMAND "" - INSTALL_COMMAND "" - TEST_COMMAND "" +if (${BUILD_CONTRIB}) + ExternalProject_Add(rocksdb + URL https://github.com/facebook/rocksdb/archive/refs/tags/v8.1.1.tar.gz + URL_HASH MD5=3b4c97ee45df9c8a5517308d31ab008b + DOWNLOAD_NO_PROGRESS 1 + DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download" + SOURCE_DIR "${TD_CONTRIB_DIR}/rocksdb" + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" ) -ENDIF(NOT ${TD_LINUX}) - +else() + if (NOT ${TD_LINUX}) + ExternalProject_Add(rocksdb + URL https://github.com/facebook/rocksdb/archive/refs/tags/v8.1.1.tar.gz + URL_HASH MD5=3b4c97ee45df9c8a5517308d31ab008b + DOWNLOAD_NO_PROGRESS 1 + DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download" + SOURCE_DIR "${TD_CONTRIB_DIR}/rocksdb" + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" + ) + endif() +endif() diff --git a/cmake/taosadapter_CMakeLists.txt.in b/cmake/taosadapter_CMakeLists.txt.in index ef6ed4af1d1171bc91867d0aa9cd8dc184852ced..13826a1a7487c0c16f3f80c934d2d406a2ed4c7f 100644 --- a/cmake/taosadapter_CMakeLists.txt.in +++ b/cmake/taosadapter_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taosadapter ExternalProject_Add(taosadapter GIT_REPOSITORY https://github.com/taosdata/taosadapter.git - GIT_TAG 3.0 + GIT_TAG main SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 1928132210fe8eb8e0168030c117632e7a6ee0c7..c60fd33b16187840d2cc27feb8321e14b801ed5d 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -77,19 +77,23 @@ if(${BUILD_WITH_LEVELDB}) cat("${TD_SUPPORT_DIR}/leveldb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) endif(${BUILD_WITH_LEVELDB}) -# rocksdb -IF (NOT ${TD_LINUX}) -if(${BUILD_WITH_ROCKSDB}) - cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) - add_definitions(-DUSE_ROCKSDB) -endif(${BUILD_WITH_ROCKSDB}) -ELSE() -if(${BUILD_WITH_ROCKSDB}) - #cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) - add_definitions(-DUSE_ROCKSDB) -endif(${BUILD_WITH_ROCKSDB}) - -ENDIF(NOT ${TD_LINUX}) +if (${BUILD_CONTRIB}) + if(${BUILD_WITH_ROCKSDB}) + cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + add_definitions(-DUSE_ROCKSDB) + endif() +else() + if (NOT ${TD_LINUX}) + if(${BUILD_WITH_ROCKSDB}) + cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + add_definitions(-DUSE_ROCKSDB) + endif(${BUILD_WITH_ROCKSDB}) + else() + if(${BUILD_WITH_ROCKSDB}) + add_definitions(-DUSE_ROCKSDB) + endif(${BUILD_WITH_ROCKSDB}) + endif() +endif() # canonical-raft if(${BUILD_WITH_CRAFT}) @@ -235,70 +239,114 @@ endif(${BUILD_WITH_LEVELDB}) # rocksdb # To support rocksdb build on ubuntu: sudo apt-get install libgflags-dev -IF (NOT ${TD_LINUX}) - -if(${BUILD_WITH_ROCKSDB}) +if (${BUILD_WITH_UV}) if(${TD_LINUX}) - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL} -Wno-error=maybe-uninitialized -Wno-error=unused-but-set-variable -Wno-error=unused-variable -Wno-error=unused-function -Wno-errno=unused-private-field -Wno-error=unused-result") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}") IF ("${CMAKE_BUILD_TYPE}" STREQUAL "") SET(CMAKE_BUILD_TYPE Release) endif() endif(${TD_LINUX}) - MESSAGE(STATUS "CXXXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS}) - - if(${TD_DARWIN}) - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized") - endif(${TD_DARWIN}) - - if (${TD_DARWIN_ARM64}) - set(HAS_ARMV8_CRC true) - endif(${TD_DARWIN_ARM64}) - - if (${TD_WINDOWS}) - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819") - endif(${TD_WINDOWS}) - - - if(${TD_DARWIN}) - option(HAVE_THREAD_LOCAL "" OFF) - option(WITH_IOSTATS_CONTEXT "" OFF) - option(WITH_PERF_CONTEXT "" OFF) - endif(${TD_DARWIN}) - - if(${TD_WINDOWS}) - option(WITH_JNI "" OFF) - endif(${TD_WINDOWS}) - - if(${TD_WINDOWS}) - option(WITH_MD_LIBRARY "build with MD" OFF) - set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib) - endif(${TD_WINDOWS}) - - - option(WITH_FALLOCATE "" OFF) - option(WITH_JEMALLOC "" OFF) - option(WITH_GFLAGS "" OFF) - option(PORTABLE "" ON) - option(WITH_LIBURING "" OFF) - option(FAIL_ON_WARNINGS OFF) - - option(WITH_TESTS "" OFF) - option(WITH_BENCHMARK_TOOLS "" OFF) - option(WITH_TOOLS "" OFF) - option(WITH_LIBURING "" OFF) - IF (TD_LINUX) - option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF) - ELSE() - option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF) - ENDIF() - add_subdirectory(rocksdb EXCLUDE_FROM_ALL) - target_include_directories( - rocksdb - PUBLIC $ - ) -endif(${BUILD_WITH_ROCKSDB}) +endif (${BUILD_WITH_UV}) + +if (${BUILD_WITH_ROCKSDB}) + if (${BUILD_CONTRIB}) + if(${TD_LINUX}) + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL} -Wno-error=maybe-uninitialized -Wno-error=unused-but-set-variable -Wno-error=unused-variable -Wno-error=unused-function -Wno-errno=unused-private-field -Wno-error=unused-result") + if ("${CMAKE_BUILD_TYPE}" STREQUAL "") + SET(CMAKE_BUILD_TYPE Release) + endif() + endif(${TD_LINUX}) + MESSAGE(STATUS "CXXXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS}) + + if(${TD_DARWIN}) + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized") + endif(${TD_DARWIN}) + + if (${TD_DARWIN_ARM64}) + set(HAS_ARMV8_CRC true) + endif(${TD_DARWIN_ARM64}) + + if (${TD_WINDOWS}) + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819") + option(WITH_JNI "" OFF) + option(WITH_MD_LIBRARY "build with MD" OFF) + set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib) + endif(${TD_WINDOWS}) + + + if(${TD_DARWIN}) + option(HAVE_THREAD_LOCAL "" OFF) + option(WITH_IOSTATS_CONTEXT "" OFF) + option(WITH_PERF_CONTEXT "" OFF) + endif(${TD_DARWIN}) + + option(WITH_FALLOCATE "" OFF) + option(WITH_JEMALLOC "" OFF) + option(WITH_GFLAGS "" OFF) + option(PORTABLE "" ON) + option(WITH_LIBURING "" OFF) + option(FAIL_ON_WARNINGS OFF) + + option(WITH_TESTS "" OFF) + option(WITH_BENCHMARK_TOOLS "" OFF) + option(WITH_TOOLS "" OFF) + option(WITH_LIBURING "" OFF) + + option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF) + add_subdirectory(rocksdb EXCLUDE_FROM_ALL) + target_include_directories( + rocksdb + PUBLIC $ + ) + else() + if (NOT ${TD_LINUX}) + MESSAGE(STATUS "CXXXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS}) + if(${TD_DARWIN}) + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized") + endif(${TD_DARWIN}) + + if (${TD_DARWIN_ARM64}) + set(HAS_ARMV8_CRC true) + endif(${TD_DARWIN_ARM64}) + + if (${TD_WINDOWS}) + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819") + option(WITH_JNI "" OFF) + option(WITH_MD_LIBRARY "build with MD" OFF) + set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib) + endif(${TD_WINDOWS}) + + + if(${TD_DARWIN}) + option(HAVE_THREAD_LOCAL "" OFF) + option(WITH_IOSTATS_CONTEXT "" OFF) + option(WITH_PERF_CONTEXT "" OFF) + endif(${TD_DARWIN}) + + option(WITH_FALLOCATE "" OFF) + option(WITH_JEMALLOC "" OFF) + option(WITH_GFLAGS "" OFF) + option(PORTABLE "" ON) + option(WITH_LIBURING "" OFF) + option(FAIL_ON_WARNINGS OFF) + + option(WITH_TESTS "" OFF) + option(WITH_BENCHMARK_TOOLS "" OFF) + option(WITH_TOOLS "" OFF) + option(WITH_LIBURING "" OFF) + + option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF) + add_subdirectory(rocksdb EXCLUDE_FROM_ALL) + target_include_directories( + rocksdb + PUBLIC $ + ) + endif() + + endif() +endif() -ENDIF(NOT ${TD_LINUX}) # lucene # To support build on ubuntu: sudo apt-get install libboost-all-dev if(${BUILD_WITH_LUCENE}) diff --git a/deps/mips/rocksdb_static/librocksdb.a b/deps/mips/rocksdb_static/librocksdb.a new file mode 100644 index 0000000000000000000000000000000000000000..6df885a0685006baaede75c70b7be0c410115439 Binary files /dev/null and b/deps/mips/rocksdb_static/librocksdb.a differ diff --git a/deps/mips/rocksdb_static/rocksdb/c.h b/deps/mips/rocksdb_static/rocksdb/c.h new file mode 100644 index 0000000000000000000000000000000000000000..1ba7fabefe92f674ecbd27a91bfbb57b0ed17d23 --- /dev/null +++ b/deps/mips/rocksdb_static/rocksdb/c.h @@ -0,0 +1,2844 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +/* Copyright (c) 2011 The LevelDB Authors. All rights reserved. + Use of this source code is governed by a BSD-style license that can be + found in the LICENSE file. See the AUTHORS file for names of contributors. + + C bindings for rocksdb. May be useful as a stable ABI that can be + used by programs that keep rocksdb in a shared library, or for + a JNI api. + + Does not support: + . getters for the option types + . custom comparators that implement key shortening + . capturing post-write-snapshot + . custom iter, db, env, cache implementations using just the C bindings + + Some conventions: + + (1) We expose just opaque struct pointers and functions to clients. + This allows us to change internal representations without having to + recompile clients. + + (2) For simplicity, there is no equivalent to the Slice type. Instead, + the caller has to pass the pointer and length as separate + arguments. + + (3) Errors are represented by a null-terminated c string. NULL + means no error. All operations that can raise an error are passed + a "char** errptr" as the last argument. One of the following must + be true on entry: + *errptr == NULL + *errptr points to a malloc()ed null-terminated error message + On success, a leveldb routine leaves *errptr unchanged. + On failure, leveldb frees the old value of *errptr and + set *errptr to a malloc()ed error message. + + (4) Bools have the type unsigned char (0 == false; rest == true) + + (5) All of the pointer arguments must be non-NULL. +*/ + +#pragma once + +#ifdef _WIN32 +#ifdef ROCKSDB_DLL +#ifdef ROCKSDB_LIBRARY_EXPORTS +#define ROCKSDB_LIBRARY_API __declspec(dllexport) +#else +#define ROCKSDB_LIBRARY_API __declspec(dllimport) +#endif +#else +#define ROCKSDB_LIBRARY_API +#endif +#else +#define ROCKSDB_LIBRARY_API +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include + +/* Exported types */ + +typedef struct rocksdb_t rocksdb_t; +typedef struct rocksdb_backup_engine_t rocksdb_backup_engine_t; +typedef struct rocksdb_backup_engine_info_t rocksdb_backup_engine_info_t; +typedef struct rocksdb_backup_engine_options_t rocksdb_backup_engine_options_t; +typedef struct rocksdb_restore_options_t rocksdb_restore_options_t; +typedef struct rocksdb_memory_allocator_t rocksdb_memory_allocator_t; +typedef struct rocksdb_lru_cache_options_t rocksdb_lru_cache_options_t; +typedef struct rocksdb_hyper_clock_cache_options_t + rocksdb_hyper_clock_cache_options_t; +typedef struct rocksdb_cache_t rocksdb_cache_t; +typedef struct rocksdb_compactionfilter_t rocksdb_compactionfilter_t; +typedef struct rocksdb_compactionfiltercontext_t + rocksdb_compactionfiltercontext_t; +typedef struct rocksdb_compactionfilterfactory_t + rocksdb_compactionfilterfactory_t; +typedef struct rocksdb_comparator_t rocksdb_comparator_t; +typedef struct rocksdb_dbpath_t rocksdb_dbpath_t; +typedef struct rocksdb_env_t rocksdb_env_t; +typedef struct rocksdb_fifo_compaction_options_t + rocksdb_fifo_compaction_options_t; +typedef struct rocksdb_filelock_t rocksdb_filelock_t; +typedef struct rocksdb_filterpolicy_t rocksdb_filterpolicy_t; +typedef struct rocksdb_flushoptions_t rocksdb_flushoptions_t; +typedef struct rocksdb_iterator_t rocksdb_iterator_t; +typedef struct rocksdb_logger_t rocksdb_logger_t; +typedef struct rocksdb_mergeoperator_t rocksdb_mergeoperator_t; +typedef struct rocksdb_options_t rocksdb_options_t; +typedef struct rocksdb_compactoptions_t rocksdb_compactoptions_t; +typedef struct rocksdb_block_based_table_options_t + rocksdb_block_based_table_options_t; +typedef struct rocksdb_cuckoo_table_options_t rocksdb_cuckoo_table_options_t; +typedef struct rocksdb_randomfile_t rocksdb_randomfile_t; +typedef struct rocksdb_readoptions_t rocksdb_readoptions_t; +typedef struct rocksdb_seqfile_t rocksdb_seqfile_t; +typedef struct rocksdb_slicetransform_t rocksdb_slicetransform_t; +typedef struct rocksdb_snapshot_t rocksdb_snapshot_t; +typedef struct rocksdb_writablefile_t rocksdb_writablefile_t; +typedef struct rocksdb_writebatch_t rocksdb_writebatch_t; +typedef struct rocksdb_writebatch_wi_t rocksdb_writebatch_wi_t; +typedef struct rocksdb_writeoptions_t rocksdb_writeoptions_t; +typedef struct rocksdb_universal_compaction_options_t + rocksdb_universal_compaction_options_t; +typedef struct rocksdb_livefiles_t rocksdb_livefiles_t; +typedef struct rocksdb_column_family_handle_t rocksdb_column_family_handle_t; +typedef struct rocksdb_column_family_metadata_t + rocksdb_column_family_metadata_t; +typedef struct rocksdb_level_metadata_t rocksdb_level_metadata_t; +typedef struct rocksdb_sst_file_metadata_t rocksdb_sst_file_metadata_t; +typedef struct rocksdb_envoptions_t rocksdb_envoptions_t; +typedef struct rocksdb_ingestexternalfileoptions_t + rocksdb_ingestexternalfileoptions_t; +typedef struct rocksdb_sstfilewriter_t rocksdb_sstfilewriter_t; +typedef struct rocksdb_ratelimiter_t rocksdb_ratelimiter_t; +typedef struct rocksdb_perfcontext_t rocksdb_perfcontext_t; +typedef struct rocksdb_pinnableslice_t rocksdb_pinnableslice_t; +typedef struct rocksdb_transactiondb_options_t rocksdb_transactiondb_options_t; +typedef struct rocksdb_transactiondb_t rocksdb_transactiondb_t; +typedef struct rocksdb_transaction_options_t rocksdb_transaction_options_t; +typedef struct rocksdb_optimistictransactiondb_t + rocksdb_optimistictransactiondb_t; +typedef struct rocksdb_optimistictransaction_options_t + rocksdb_optimistictransaction_options_t; +typedef struct rocksdb_transaction_t rocksdb_transaction_t; +typedef struct rocksdb_checkpoint_t rocksdb_checkpoint_t; +typedef struct rocksdb_wal_iterator_t rocksdb_wal_iterator_t; +typedef struct rocksdb_wal_readoptions_t rocksdb_wal_readoptions_t; +typedef struct rocksdb_memory_consumers_t rocksdb_memory_consumers_t; +typedef struct rocksdb_memory_usage_t rocksdb_memory_usage_t; + +/* DB operations */ + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open( + const rocksdb_options_t* options, const char* name, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_with_ttl( + const rocksdb_options_t* options, const char* name, int ttl, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_for_read_only( + const rocksdb_options_t* options, const char* name, + unsigned char error_if_wal_file_exists, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_as_secondary( + const rocksdb_options_t* options, const char* name, + const char* secondary_path, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_backup_engine_t* rocksdb_backup_engine_open( + const rocksdb_options_t* options, const char* path, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_backup_engine_t* +rocksdb_backup_engine_open_opts(const rocksdb_backup_engine_options_t* options, + rocksdb_env_t* env, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_create_new_backup( + rocksdb_backup_engine_t* be, rocksdb_t* db, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_create_new_backup_flush( + rocksdb_backup_engine_t* be, rocksdb_t* db, + unsigned char flush_before_backup, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_purge_old_backups( + rocksdb_backup_engine_t* be, uint32_t num_backups_to_keep, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_restore_options_t* +rocksdb_restore_options_create(void); +extern ROCKSDB_LIBRARY_API void rocksdb_restore_options_destroy( + rocksdb_restore_options_t* opt); +extern ROCKSDB_LIBRARY_API void rocksdb_restore_options_set_keep_log_files( + rocksdb_restore_options_t* opt, int v); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_verify_backup( + rocksdb_backup_engine_t* be, uint32_t backup_id, char** errptr); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_restore_db_from_latest_backup( + rocksdb_backup_engine_t* be, const char* db_dir, const char* wal_dir, + const rocksdb_restore_options_t* restore_options, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_restore_db_from_backup( + rocksdb_backup_engine_t* be, const char* db_dir, const char* wal_dir, + const rocksdb_restore_options_t* restore_options, const uint32_t backup_id, + char** errptr); + +extern ROCKSDB_LIBRARY_API const rocksdb_backup_engine_info_t* +rocksdb_backup_engine_get_backup_info(rocksdb_backup_engine_t* be); + +extern ROCKSDB_LIBRARY_API int rocksdb_backup_engine_info_count( + const rocksdb_backup_engine_info_t* info); + +extern ROCKSDB_LIBRARY_API int64_t rocksdb_backup_engine_info_timestamp( + const rocksdb_backup_engine_info_t* info, int index); + +extern ROCKSDB_LIBRARY_API uint32_t rocksdb_backup_engine_info_backup_id( + const rocksdb_backup_engine_info_t* info, int index); + +extern ROCKSDB_LIBRARY_API uint64_t rocksdb_backup_engine_info_size( + const rocksdb_backup_engine_info_t* info, int index); + +extern ROCKSDB_LIBRARY_API uint32_t rocksdb_backup_engine_info_number_files( + const rocksdb_backup_engine_info_t* info, int index); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_info_destroy( + const rocksdb_backup_engine_info_t* info); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_close( + rocksdb_backup_engine_t* be); + +extern ROCKSDB_LIBRARY_API void rocksdb_put_with_ts( + rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, + size_t keylen, const char* ts, size_t tslen, const char* val, size_t vallen, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_put_cf_with_ts( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, const char* ts, size_t tslen, const char* val, size_t vallen, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete_with_ts( + rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, + size_t keylen, const char* ts, size_t tslen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete_cf_with_ts( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, const char* ts, size_t tslen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_singledelete( + rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, + size_t keylen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_singledelete_cf( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_singledelete_with_ts( + rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, + size_t keylen, const char* ts, size_t tslen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_singledelete_cf_with_ts( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, const char* ts, size_t tslen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_increase_full_history_ts_low( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + const char* ts_low, size_t ts_lowlen, char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_get_full_history_ts_low( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + size_t* ts_lowlen, char** errptr); + +/* BackupEngineOptions */ + +extern ROCKSDB_LIBRARY_API rocksdb_backup_engine_options_t* +rocksdb_backup_engine_options_create(const char* backup_dir); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_options_set_backup_dir( + rocksdb_backup_engine_options_t* options, const char* backup_dir); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_options_set_env( + rocksdb_backup_engine_options_t* options, rocksdb_env_t* env); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_share_table_files( + rocksdb_backup_engine_options_t* options, unsigned char val); + +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_backup_engine_options_get_share_table_files( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_options_set_sync( + rocksdb_backup_engine_options_t* options, unsigned char val); + +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_backup_engine_options_get_sync( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_destroy_old_data( + rocksdb_backup_engine_options_t* options, unsigned char val); + +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_backup_engine_options_get_destroy_old_data( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_backup_log_files( + rocksdb_backup_engine_options_t* options, unsigned char val); + +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_backup_engine_options_get_backup_log_files( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_backup_rate_limit( + rocksdb_backup_engine_options_t* options, uint64_t limit); + +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_backup_engine_options_get_backup_rate_limit( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_restore_rate_limit( + rocksdb_backup_engine_options_t* options, uint64_t limit); + +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_backup_engine_options_get_restore_rate_limit( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_max_background_operations( + rocksdb_backup_engine_options_t* options, int val); + +extern ROCKSDB_LIBRARY_API int +rocksdb_backup_engine_options_get_max_background_operations( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_callback_trigger_interval_size( + rocksdb_backup_engine_options_t* options, uint64_t size); + +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_backup_engine_options_get_callback_trigger_interval_size( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_max_valid_backups_to_open( + rocksdb_backup_engine_options_t* options, int val); + +extern ROCKSDB_LIBRARY_API int +rocksdb_backup_engine_options_get_max_valid_backups_to_open( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_share_files_with_checksum_naming( + rocksdb_backup_engine_options_t* options, int val); + +extern ROCKSDB_LIBRARY_API int +rocksdb_backup_engine_options_get_share_files_with_checksum_naming( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_options_destroy( + rocksdb_backup_engine_options_t*); + +/* Checkpoint */ + +extern ROCKSDB_LIBRARY_API rocksdb_checkpoint_t* +rocksdb_checkpoint_object_create(rocksdb_t* db, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_checkpoint_create( + rocksdb_checkpoint_t* checkpoint, const char* checkpoint_dir, + uint64_t log_size_for_flush, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_checkpoint_object_destroy( + rocksdb_checkpoint_t* checkpoint); + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_and_trim_history( + const rocksdb_options_t* options, const char* name, int num_column_families, + const char* const* column_family_names, + const rocksdb_options_t* const* column_family_options, + rocksdb_column_family_handle_t** column_family_handles, char* trim_ts, + size_t trim_tslen, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_column_families( + const rocksdb_options_t* options, const char* name, int num_column_families, + const char* const* column_family_names, + const rocksdb_options_t* const* column_family_options, + rocksdb_column_family_handle_t** column_family_handles, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_column_families_with_ttl( + const rocksdb_options_t* options, const char* name, int num_column_families, + const char* const* column_family_names, + const rocksdb_options_t* const* column_family_options, + rocksdb_column_family_handle_t** column_family_handles, const int* ttls, + char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* +rocksdb_open_for_read_only_column_families( + const rocksdb_options_t* options, const char* name, int num_column_families, + const char* const* column_family_names, + const rocksdb_options_t* const* column_family_options, + rocksdb_column_family_handle_t** column_family_handles, + unsigned char error_if_wal_file_exists, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_as_secondary_column_families( + const rocksdb_options_t* options, const char* name, + const char* secondary_path, int num_column_families, + const char* const* column_family_names, + const rocksdb_options_t* const* column_family_options, + rocksdb_column_family_handle_t** column_family_handles, char** errptr); + +extern ROCKSDB_LIBRARY_API char** rocksdb_list_column_families( + const rocksdb_options_t* options, const char* name, size_t* lencf, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_list_column_families_destroy( + char** list, size_t len); + +extern ROCKSDB_LIBRARY_API rocksdb_column_family_handle_t* +rocksdb_create_column_family(rocksdb_t* db, + const rocksdb_options_t* column_family_options, + const char* column_family_name, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_column_family_handle_t* +rocksdb_create_column_family_with_ttl( + rocksdb_t* db, const rocksdb_options_t* column_family_options, + const char* column_family_name, int ttl, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_drop_column_family( + rocksdb_t* db, rocksdb_column_family_handle_t* handle, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_column_family_handle_destroy( + rocksdb_column_family_handle_t*); + +extern ROCKSDB_LIBRARY_API uint32_t +rocksdb_column_family_handle_get_id(rocksdb_column_family_handle_t* handle); + +extern ROCKSDB_LIBRARY_API char* rocksdb_column_family_handle_get_name( + rocksdb_column_family_handle_t* handle, size_t* name_len); + +extern ROCKSDB_LIBRARY_API void rocksdb_close(rocksdb_t* db); + +extern ROCKSDB_LIBRARY_API void rocksdb_put( + rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, + size_t keylen, const char* val, size_t vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_put_cf( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, const char* val, size_t vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete( + rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, + size_t keylen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete_cf( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete_range_cf( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* start_key, + size_t start_key_len, const char* end_key, size_t end_key_len, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_merge( + rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, + size_t keylen, const char* val, size_t vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_merge_cf( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, const char* val, size_t vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_write( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_writebatch_t* batch, char** errptr); + +/* Returns NULL if not found. A malloc()ed array otherwise. + Stores the length of the array in *vallen. */ +extern ROCKSDB_LIBRARY_API char* rocksdb_get( + rocksdb_t* db, const rocksdb_readoptions_t* options, const char* key, + size_t keylen, size_t* vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_get_with_ts( + rocksdb_t* db, const rocksdb_readoptions_t* options, const char* key, + size_t keylen, size_t* vallen, char** ts, size_t* tslen, char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_get_cf( + rocksdb_t* db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, size_t* vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_get_cf_with_ts( + rocksdb_t* db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, size_t* vallen, char** ts, size_t* tslen, char** errptr); + +// if values_list[i] == NULL and errs[i] == NULL, +// then we got status.IsNotFound(), which we will not return. +// all errors except status status.ok() and status.IsNotFound() are returned. +// +// errs, values_list and values_list_sizes must be num_keys in length, +// allocated by the caller. +// errs is a list of strings as opposed to the conventional one error, +// where errs[i] is the status for retrieval of keys_list[i]. +// each non-NULL errs entry is a malloc()ed, null terminated string. +// each non-NULL values_list entry is a malloc()ed array, with +// the length for each stored in values_list_sizes[i]. +extern ROCKSDB_LIBRARY_API void rocksdb_multi_get( + rocksdb_t* db, const rocksdb_readoptions_t* options, size_t num_keys, + const char* const* keys_list, const size_t* keys_list_sizes, + char** values_list, size_t* values_list_sizes, char** errs); + +extern ROCKSDB_LIBRARY_API void rocksdb_multi_get_with_ts( + rocksdb_t* db, const rocksdb_readoptions_t* options, size_t num_keys, + const char* const* keys_list, const size_t* keys_list_sizes, + char** values_list, size_t* values_list_sizes, char** timestamp_list, + size_t* timestamp_list_sizes, char** errs); + +extern ROCKSDB_LIBRARY_API void rocksdb_multi_get_cf( + rocksdb_t* db, const rocksdb_readoptions_t* options, + const rocksdb_column_family_handle_t* const* column_families, + size_t num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, char** values_list, + size_t* values_list_sizes, char** errs); + +extern ROCKSDB_LIBRARY_API void rocksdb_multi_get_cf_with_ts( + rocksdb_t* db, const rocksdb_readoptions_t* options, + const rocksdb_column_family_handle_t* const* column_families, + size_t num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, char** values_list, + size_t* values_list_sizes, char** timestamps_list, + size_t* timestamps_list_sizes, char** errs); + +// The MultiGet API that improves performance by batching operations +// in the read path for greater efficiency. Currently, only the block based +// table format with full filters are supported. Other table formats such +// as plain table, block based table with block based filters and +// partitioned indexes will still work, but will not get any performance +// benefits. +// +// Note that all the keys passed to this API are restricted to a single +// column family. +// +// Parameters - +// db - the RocksDB instance. +// options - ReadOptions +// column_family - ColumnFamilyHandle* that the keys belong to. All the keys +// passed to the API are restricted to a single column family +// num_keys - Number of keys to lookup +// keys_list - Pointer to C style array of keys with num_keys elements +// keys_list_sizes - Pointer to C style array of the size of corresponding key +// in key_list with num_keys elements. +// values - Pointer to C style array of PinnableSlices with num_keys elements +// statuses - Pointer to C style array of Status with num_keys elements +// sorted_input - If true, it means the input keys are already sorted by key +// order, so the MultiGet() API doesn't have to sort them +// again. If false, the keys will be copied and sorted +// internally by the API - the input array will not be +// modified +extern ROCKSDB_LIBRARY_API void rocksdb_batched_multi_get_cf( + rocksdb_t* db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, size_t num_keys, + const char* const* keys_list, const size_t* keys_list_sizes, + rocksdb_pinnableslice_t** values, char** errs, const bool sorted_input); + +// The value is only allocated (using malloc) and returned if it is found and +// value_found isn't NULL. In that case the user is responsible for freeing it. +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_key_may_exist( + rocksdb_t* db, const rocksdb_readoptions_t* options, const char* key, + size_t key_len, char** value, size_t* val_len, const char* timestamp, + size_t timestamp_len, unsigned char* value_found); + +// The value is only allocated (using malloc) and returned if it is found and +// value_found isn't NULL. In that case the user is responsible for freeing it. +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_key_may_exist_cf( + rocksdb_t* db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t key_len, char** value, size_t* val_len, const char* timestamp, + size_t timestamp_len, unsigned char* value_found); + +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* rocksdb_create_iterator( + rocksdb_t* db, const rocksdb_readoptions_t* options); + +extern ROCKSDB_LIBRARY_API rocksdb_wal_iterator_t* rocksdb_get_updates_since( + rocksdb_t* db, uint64_t seq_number, + const rocksdb_wal_readoptions_t* options, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* rocksdb_create_iterator_cf( + rocksdb_t* db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family); + +extern ROCKSDB_LIBRARY_API void rocksdb_create_iterators( + rocksdb_t* db, rocksdb_readoptions_t* opts, + rocksdb_column_family_handle_t** column_families, + rocksdb_iterator_t** iterators, size_t size, char** errptr); + +extern ROCKSDB_LIBRARY_API const rocksdb_snapshot_t* rocksdb_create_snapshot( + rocksdb_t* db); + +extern ROCKSDB_LIBRARY_API void rocksdb_release_snapshot( + rocksdb_t* db, const rocksdb_snapshot_t* snapshot); + +/* Returns NULL if property name is unknown. + Else returns a pointer to a malloc()-ed null-terminated value. */ +extern ROCKSDB_LIBRARY_API char* rocksdb_property_value(rocksdb_t* db, + const char* propname); +/* returns 0 on success, -1 otherwise */ +extern ROCKSDB_LIBRARY_API int rocksdb_property_int(rocksdb_t* db, + const char* propname, + uint64_t* out_val); + +/* returns 0 on success, -1 otherwise */ +extern ROCKSDB_LIBRARY_API int rocksdb_property_int_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + const char* propname, uint64_t* out_val); + +extern ROCKSDB_LIBRARY_API char* rocksdb_property_value_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + const char* propname); + +extern ROCKSDB_LIBRARY_API void rocksdb_approximate_sizes( + rocksdb_t* db, int num_ranges, const char* const* range_start_key, + const size_t* range_start_key_len, const char* const* range_limit_key, + const size_t* range_limit_key_len, uint64_t* sizes, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_approximate_sizes_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + int num_ranges, const char* const* range_start_key, + const size_t* range_start_key_len, const char* const* range_limit_key, + const size_t* range_limit_key_len, uint64_t* sizes, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_compact_range(rocksdb_t* db, + const char* start_key, + size_t start_key_len, + const char* limit_key, + size_t limit_key_len); + +extern ROCKSDB_LIBRARY_API void rocksdb_compact_range_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + const char* start_key, size_t start_key_len, const char* limit_key, + size_t limit_key_len); + +extern ROCKSDB_LIBRARY_API void rocksdb_suggest_compact_range( + rocksdb_t* db, const char* start_key, size_t start_key_len, + const char* limit_key, size_t limit_key_len, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_suggest_compact_range_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + const char* start_key, size_t start_key_len, const char* limit_key, + size_t limit_key_len, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_compact_range_opt( + rocksdb_t* db, rocksdb_compactoptions_t* opt, const char* start_key, + size_t start_key_len, const char* limit_key, size_t limit_key_len); + +extern ROCKSDB_LIBRARY_API void rocksdb_compact_range_cf_opt( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + rocksdb_compactoptions_t* opt, const char* start_key, size_t start_key_len, + const char* limit_key, size_t limit_key_len); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete_file(rocksdb_t* db, + const char* name); + +extern ROCKSDB_LIBRARY_API const rocksdb_livefiles_t* rocksdb_livefiles( + rocksdb_t* db); + +extern ROCKSDB_LIBRARY_API void rocksdb_flush( + rocksdb_t* db, const rocksdb_flushoptions_t* options, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_flush_cf( + rocksdb_t* db, const rocksdb_flushoptions_t* options, + rocksdb_column_family_handle_t* column_family, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_flush_cfs( + rocksdb_t* db, const rocksdb_flushoptions_t* options, + rocksdb_column_family_handle_t** column_family, int num_column_families, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_flush_wal(rocksdb_t* db, + unsigned char sync, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_disable_file_deletions(rocksdb_t* db, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_enable_file_deletions( + rocksdb_t* db, unsigned char force, char** errptr); + +/* Management operations */ + +extern ROCKSDB_LIBRARY_API void rocksdb_destroy_db( + const rocksdb_options_t* options, const char* name, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_repair_db( + const rocksdb_options_t* options, const char* name, char** errptr); + +/* Iterator */ + +extern ROCKSDB_LIBRARY_API void rocksdb_iter_destroy(rocksdb_iterator_t*); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_iter_valid( + const rocksdb_iterator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_seek_to_first(rocksdb_iterator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_seek_to_last(rocksdb_iterator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_seek(rocksdb_iterator_t*, + const char* k, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_seek_for_prev(rocksdb_iterator_t*, + const char* k, + size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_next(rocksdb_iterator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_prev(rocksdb_iterator_t*); +extern ROCKSDB_LIBRARY_API const char* rocksdb_iter_key( + const rocksdb_iterator_t*, size_t* klen); +extern ROCKSDB_LIBRARY_API const char* rocksdb_iter_value( + const rocksdb_iterator_t*, size_t* vlen); +extern ROCKSDB_LIBRARY_API const char* rocksdb_iter_timestamp( + const rocksdb_iterator_t*, size_t* tslen); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_get_error( + const rocksdb_iterator_t*, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_wal_iter_next( + rocksdb_wal_iterator_t* iter); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_wal_iter_valid( + const rocksdb_wal_iterator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_wal_iter_status( + const rocksdb_wal_iterator_t* iter, char** errptr); +extern ROCKSDB_LIBRARY_API rocksdb_writebatch_t* rocksdb_wal_iter_get_batch( + const rocksdb_wal_iterator_t* iter, uint64_t* seq); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_get_latest_sequence_number(rocksdb_t* db); +extern ROCKSDB_LIBRARY_API void rocksdb_wal_iter_destroy( + const rocksdb_wal_iterator_t* iter); + +/* Write batch */ + +extern ROCKSDB_LIBRARY_API rocksdb_writebatch_t* rocksdb_writebatch_create( + void); +extern ROCKSDB_LIBRARY_API rocksdb_writebatch_t* rocksdb_writebatch_create_from( + const char* rep, size_t size); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_destroy( + rocksdb_writebatch_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_clear(rocksdb_writebatch_t*); +extern ROCKSDB_LIBRARY_API int rocksdb_writebatch_count(rocksdb_writebatch_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_put(rocksdb_writebatch_t*, + const char* key, + size_t klen, + const char* val, + size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_put_cf( + rocksdb_writebatch_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* val, size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_put_cf_with_ts( + rocksdb_writebatch_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* ts, size_t tslen, const char* val, + size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_putv( + rocksdb_writebatch_t* b, int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, int num_values, + const char* const* values_list, const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_putv_cf( + rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, const size_t* keys_list_sizes, + int num_values, const char* const* values_list, + const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_merge(rocksdb_writebatch_t*, + const char* key, + size_t klen, + const char* val, + size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_merge_cf( + rocksdb_writebatch_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* val, size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_mergev( + rocksdb_writebatch_t* b, int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, int num_values, + const char* const* values_list, const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_mergev_cf( + rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, const size_t* keys_list_sizes, + int num_values, const char* const* values_list, + const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete(rocksdb_writebatch_t*, + const char* key, + size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_singledelete( + rocksdb_writebatch_t* b, const char* key, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_cf( + rocksdb_writebatch_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_cf_with_ts( + rocksdb_writebatch_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* ts, size_t tslen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_singledelete_cf( + rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_singledelete_cf_with_ts( + rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* ts, size_t tslen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_deletev( + rocksdb_writebatch_t* b, int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_deletev_cf( + rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, const size_t* keys_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_range( + rocksdb_writebatch_t* b, const char* start_key, size_t start_key_len, + const char* end_key, size_t end_key_len); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_range_cf( + rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, + const char* start_key, size_t start_key_len, const char* end_key, + size_t end_key_len); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_rangev( + rocksdb_writebatch_t* b, int num_keys, const char* const* start_keys_list, + const size_t* start_keys_list_sizes, const char* const* end_keys_list, + const size_t* end_keys_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_rangev_cf( + rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* start_keys_list, + const size_t* start_keys_list_sizes, const char* const* end_keys_list, + const size_t* end_keys_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_put_log_data( + rocksdb_writebatch_t*, const char* blob, size_t len); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_iterate( + rocksdb_writebatch_t*, void* state, + void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen), + void (*deleted)(void*, const char* k, size_t klen)); +extern ROCKSDB_LIBRARY_API const char* rocksdb_writebatch_data( + rocksdb_writebatch_t*, size_t* size); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_set_save_point( + rocksdb_writebatch_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_rollback_to_save_point( + rocksdb_writebatch_t*, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_pop_save_point( + rocksdb_writebatch_t*, char** errptr); + +/* Write batch with index */ + +extern ROCKSDB_LIBRARY_API rocksdb_writebatch_wi_t* +rocksdb_writebatch_wi_create(size_t reserved_bytes, + unsigned char overwrite_keys); +extern ROCKSDB_LIBRARY_API rocksdb_writebatch_wi_t* +rocksdb_writebatch_wi_create_from(const char* rep, size_t size); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_destroy( + rocksdb_writebatch_wi_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_clear( + rocksdb_writebatch_wi_t*); +extern ROCKSDB_LIBRARY_API int rocksdb_writebatch_wi_count( + rocksdb_writebatch_wi_t* b); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_put( + rocksdb_writebatch_wi_t*, const char* key, size_t klen, const char* val, + size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_put_cf( + rocksdb_writebatch_wi_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* val, size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_putv( + rocksdb_writebatch_wi_t* b, int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, int num_values, + const char* const* values_list, const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_putv_cf( + rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, const size_t* keys_list_sizes, + int num_values, const char* const* values_list, + const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_merge( + rocksdb_writebatch_wi_t*, const char* key, size_t klen, const char* val, + size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_merge_cf( + rocksdb_writebatch_wi_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* val, size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_mergev( + rocksdb_writebatch_wi_t* b, int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, int num_values, + const char* const* values_list, const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_mergev_cf( + rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, const size_t* keys_list_sizes, + int num_values, const char* const* values_list, + const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete( + rocksdb_writebatch_wi_t*, const char* key, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_singledelete( + rocksdb_writebatch_wi_t*, const char* key, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete_cf( + rocksdb_writebatch_wi_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_singledelete_cf( + rocksdb_writebatch_wi_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_deletev( + rocksdb_writebatch_wi_t* b, int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_deletev_cf( + rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, const size_t* keys_list_sizes); +// DO NOT USE - rocksdb_writebatch_wi_delete_range is not yet supported +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete_range( + rocksdb_writebatch_wi_t* b, const char* start_key, size_t start_key_len, + const char* end_key, size_t end_key_len); +// DO NOT USE - rocksdb_writebatch_wi_delete_range_cf is not yet supported +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete_range_cf( + rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family, + const char* start_key, size_t start_key_len, const char* end_key, + size_t end_key_len); +// DO NOT USE - rocksdb_writebatch_wi_delete_rangev is not yet supported +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete_rangev( + rocksdb_writebatch_wi_t* b, int num_keys, + const char* const* start_keys_list, const size_t* start_keys_list_sizes, + const char* const* end_keys_list, const size_t* end_keys_list_sizes); +// DO NOT USE - rocksdb_writebatch_wi_delete_rangev_cf is not yet supported +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete_rangev_cf( + rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* start_keys_list, + const size_t* start_keys_list_sizes, const char* const* end_keys_list, + const size_t* end_keys_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_put_log_data( + rocksdb_writebatch_wi_t*, const char* blob, size_t len); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_iterate( + rocksdb_writebatch_wi_t* b, void* state, + void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen), + void (*deleted)(void*, const char* k, size_t klen)); +extern ROCKSDB_LIBRARY_API const char* rocksdb_writebatch_wi_data( + rocksdb_writebatch_wi_t* b, size_t* size); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_set_save_point( + rocksdb_writebatch_wi_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_rollback_to_save_point( + rocksdb_writebatch_wi_t*, char** errptr); +extern ROCKSDB_LIBRARY_API char* rocksdb_writebatch_wi_get_from_batch( + rocksdb_writebatch_wi_t* wbwi, const rocksdb_options_t* options, + const char* key, size_t keylen, size_t* vallen, char** errptr); +extern ROCKSDB_LIBRARY_API char* rocksdb_writebatch_wi_get_from_batch_cf( + rocksdb_writebatch_wi_t* wbwi, const rocksdb_options_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, size_t* vallen, char** errptr); +extern ROCKSDB_LIBRARY_API char* rocksdb_writebatch_wi_get_from_batch_and_db( + rocksdb_writebatch_wi_t* wbwi, rocksdb_t* db, + const rocksdb_readoptions_t* options, const char* key, size_t keylen, + size_t* vallen, char** errptr); +extern ROCKSDB_LIBRARY_API char* rocksdb_writebatch_wi_get_from_batch_and_db_cf( + rocksdb_writebatch_wi_t* wbwi, rocksdb_t* db, + const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, size_t* vallen, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_write_writebatch_wi( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_writebatch_wi_t* wbwi, char** errptr); +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* +rocksdb_writebatch_wi_create_iterator_with_base( + rocksdb_writebatch_wi_t* wbwi, rocksdb_iterator_t* base_iterator); +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* +rocksdb_writebatch_wi_create_iterator_with_base_cf( + rocksdb_writebatch_wi_t* wbwi, rocksdb_iterator_t* base_iterator, + rocksdb_column_family_handle_t* cf); + +/* Options utils */ + +// Load the latest rocksdb options from the specified db_path. +// +// On success, num_column_families will be updated with a non-zero +// number indicating the number of column families. +// The returned db_options, column_family_names, and column_family_options +// should be released via rocksdb_load_latest_options_destroy(). +// +// On error, a non-null errptr that includes the error message will be +// returned. db_options, column_family_names, and column_family_options +// will be set to NULL. +extern ROCKSDB_LIBRARY_API void rocksdb_load_latest_options( + const char* db_path, rocksdb_env_t* env, bool ignore_unknown_options, + rocksdb_cache_t* cache, rocksdb_options_t** db_options, + size_t* num_column_families, char*** column_family_names, + rocksdb_options_t*** column_family_options, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_load_latest_options_destroy( + rocksdb_options_t* db_options, char** list_column_family_names, + rocksdb_options_t** list_column_family_options, size_t len); + +/* Block based table options */ + +extern ROCKSDB_LIBRARY_API rocksdb_block_based_table_options_t* +rocksdb_block_based_options_create(void); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_destroy( + rocksdb_block_based_table_options_t* options); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_checksum( + rocksdb_block_based_table_options_t*, char); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_block_size( + rocksdb_block_based_table_options_t* options, size_t block_size); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_block_size_deviation( + rocksdb_block_based_table_options_t* options, int block_size_deviation); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_block_restart_interval( + rocksdb_block_based_table_options_t* options, int block_restart_interval); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_index_block_restart_interval( + rocksdb_block_based_table_options_t* options, + int index_block_restart_interval); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_metadata_block_size( + rocksdb_block_based_table_options_t* options, uint64_t metadata_block_size); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_partition_filters( + rocksdb_block_based_table_options_t* options, + unsigned char partition_filters); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_optimize_filters_for_memory( + rocksdb_block_based_table_options_t* options, + unsigned char optimize_filters_for_memory); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_use_delta_encoding( + rocksdb_block_based_table_options_t* options, + unsigned char use_delta_encoding); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_filter_policy( + rocksdb_block_based_table_options_t* options, + rocksdb_filterpolicy_t* filter_policy); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_no_block_cache( + rocksdb_block_based_table_options_t* options, unsigned char no_block_cache); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_block_cache( + rocksdb_block_based_table_options_t* options, rocksdb_cache_t* block_cache); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_whole_key_filtering( + rocksdb_block_based_table_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_format_version( + rocksdb_block_based_table_options_t*, int); +enum { + rocksdb_block_based_table_index_type_binary_search = 0, + rocksdb_block_based_table_index_type_hash_search = 1, + rocksdb_block_based_table_index_type_two_level_index_search = 2, +}; +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_index_type( + rocksdb_block_based_table_options_t*, int); // uses one of the above enums +enum { + rocksdb_block_based_table_data_block_index_type_binary_search = 0, + rocksdb_block_based_table_data_block_index_type_binary_search_and_hash = 1, +}; +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_data_block_index_type( + rocksdb_block_based_table_options_t*, int); // uses one of the above enums +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_data_block_hash_ratio( + rocksdb_block_based_table_options_t* options, double v); +// rocksdb_block_based_options_set_hash_index_allow_collision() +// is removed since BlockBasedTableOptions.hash_index_allow_collision() +// is removed +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_cache_index_and_filter_blocks( + rocksdb_block_based_table_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_cache_index_and_filter_blocks_with_high_priority( + rocksdb_block_based_table_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_pin_l0_filter_and_index_blocks_in_cache( + rocksdb_block_based_table_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_pin_top_level_index_and_filter( + rocksdb_block_based_table_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_block_based_table_factory( + rocksdb_options_t* opt, rocksdb_block_based_table_options_t* table_options); + +/* Cuckoo table options */ + +extern ROCKSDB_LIBRARY_API rocksdb_cuckoo_table_options_t* +rocksdb_cuckoo_options_create(void); +extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_destroy( + rocksdb_cuckoo_table_options_t* options); +extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_set_hash_ratio( + rocksdb_cuckoo_table_options_t* options, double v); +extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_set_max_search_depth( + rocksdb_cuckoo_table_options_t* options, uint32_t v); +extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_set_cuckoo_block_size( + rocksdb_cuckoo_table_options_t* options, uint32_t v); +extern ROCKSDB_LIBRARY_API void +rocksdb_cuckoo_options_set_identity_as_first_hash( + rocksdb_cuckoo_table_options_t* options, unsigned char v); +extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_set_use_module_hash( + rocksdb_cuckoo_table_options_t* options, unsigned char v); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_cuckoo_table_factory( + rocksdb_options_t* opt, rocksdb_cuckoo_table_options_t* table_options); + +/* Options */ +extern ROCKSDB_LIBRARY_API void rocksdb_set_options(rocksdb_t* db, int count, + const char* const keys[], + const char* const values[], + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_set_options_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* handle, int count, + const char* const keys[], const char* const values[], char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_options_t* rocksdb_options_create(void); +extern ROCKSDB_LIBRARY_API void rocksdb_options_destroy(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API rocksdb_options_t* rocksdb_options_create_copy( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_increase_parallelism( + rocksdb_options_t* opt, int total_threads); +extern ROCKSDB_LIBRARY_API void rocksdb_options_optimize_for_point_lookup( + rocksdb_options_t* opt, uint64_t block_cache_size_mb); +extern ROCKSDB_LIBRARY_API void rocksdb_options_optimize_level_style_compaction( + rocksdb_options_t* opt, uint64_t memtable_memory_budget); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_optimize_universal_style_compaction( + rocksdb_options_t* opt, uint64_t memtable_memory_budget); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_allow_ingest_behind( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_allow_ingest_behind(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compaction_filter( + rocksdb_options_t*, rocksdb_compactionfilter_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compaction_filter_factory( + rocksdb_options_t*, rocksdb_compactionfilterfactory_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_compaction_readahead_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_compaction_readahead_size(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_comparator( + rocksdb_options_t*, rocksdb_comparator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_merge_operator( + rocksdb_options_t*, rocksdb_mergeoperator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_uint64add_merge_operator( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compression_per_level( + rocksdb_options_t* opt, const int* level_values, size_t num_levels); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_create_if_missing( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_create_if_missing( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_create_missing_column_families(rocksdb_options_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_create_missing_column_families(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_error_if_exists( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_error_if_exists( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_paranoid_checks( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_paranoid_checks( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_db_paths( + rocksdb_options_t*, const rocksdb_dbpath_t** path_values, size_t num_paths); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_env(rocksdb_options_t*, + rocksdb_env_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_info_log(rocksdb_options_t*, + rocksdb_logger_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_info_log_level( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_info_log_level( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_write_buffer_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_write_buffer_size(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_db_write_buffer_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_db_write_buffer_size(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_open_files( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_open_files( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_file_opening_threads( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_file_opening_threads( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_total_wal_size( + rocksdb_options_t* opt, uint64_t n); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_max_total_wal_size(rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compression_options( + rocksdb_options_t*, int, int, int, int); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_compression_options_zstd_max_train_bytes(rocksdb_options_t*, + int); +extern ROCKSDB_LIBRARY_API int +rocksdb_options_get_compression_options_zstd_max_train_bytes( + rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_compression_options_use_zstd_dict_trainer( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_compression_options_use_zstd_dict_trainer( + rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_compression_options_parallel_threads(rocksdb_options_t*, + int); +extern ROCKSDB_LIBRARY_API int +rocksdb_options_get_compression_options_parallel_threads( + rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_compression_options_max_dict_buffer_bytes( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_compression_options_max_dict_buffer_bytes( + rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_bottommost_compression_options(rocksdb_options_t*, int, int, + int, int, unsigned char); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_bottommost_compression_options_zstd_max_train_bytes( + rocksdb_options_t*, int, unsigned char); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_bottommost_compression_options_use_zstd_dict_trainer( + rocksdb_options_t*, unsigned char, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_bottommost_compression_options_use_zstd_dict_trainer( + rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_bottommost_compression_options_max_dict_buffer_bytes( + rocksdb_options_t*, uint64_t, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_prefix_extractor( + rocksdb_options_t*, rocksdb_slicetransform_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_num_levels( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_num_levels( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_level0_file_num_compaction_trigger(rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_options_get_level0_file_num_compaction_trigger(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_level0_slowdown_writes_trigger(rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_options_get_level0_slowdown_writes_trigger(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_level0_stop_writes_trigger( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_level0_stop_writes_trigger( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_target_file_size_base( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_target_file_size_base(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_target_file_size_multiplier( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_target_file_size_multiplier( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_bytes_for_level_base( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_max_bytes_for_level_base(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_level_compaction_dynamic_level_bytes(rocksdb_options_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_level_compaction_dynamic_level_bytes(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_max_bytes_for_level_multiplier(rocksdb_options_t*, double); +extern ROCKSDB_LIBRARY_API double +rocksdb_options_get_max_bytes_for_level_multiplier(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_max_bytes_for_level_multiplier_additional( + rocksdb_options_t*, int* level_values, size_t num_levels); +extern ROCKSDB_LIBRARY_API void rocksdb_options_enable_statistics( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_skip_stats_update_on_db_open(rocksdb_options_t* opt, + unsigned char val); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_skip_stats_update_on_db_open(rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_skip_checking_sst_file_sizes_on_db_open( + rocksdb_options_t* opt, unsigned char val); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_skip_checking_sst_file_sizes_on_db_open( + rocksdb_options_t* opt); + +/* Blob Options Settings */ +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_enable_blob_files( + rocksdb_options_t* opt, unsigned char val); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_enable_blob_files( + rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_min_blob_size( + rocksdb_options_t* opt, uint64_t val); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_min_blob_size(rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_blob_file_size( + rocksdb_options_t* opt, uint64_t val); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_blob_file_size(rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_blob_compression_type( + rocksdb_options_t* opt, int val); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_blob_compression_type( + rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_enable_blob_gc( + rocksdb_options_t* opt, unsigned char val); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_enable_blob_gc( + rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_blob_gc_age_cutoff( + rocksdb_options_t* opt, double val); +extern ROCKSDB_LIBRARY_API double rocksdb_options_get_blob_gc_age_cutoff( + rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_blob_gc_force_threshold( + rocksdb_options_t* opt, double val); +extern ROCKSDB_LIBRARY_API double rocksdb_options_get_blob_gc_force_threshold( + rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_blob_compaction_readahead_size(rocksdb_options_t* opt, + uint64_t val); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_blob_compaction_readahead_size(rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_blob_file_starting_level( + rocksdb_options_t* opt, int val); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_blob_file_starting_level( + rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_blob_cache( + rocksdb_options_t* opt, rocksdb_cache_t* blob_cache); + +enum { + rocksdb_prepopulate_blob_disable = 0, + rocksdb_prepopulate_blob_flush_only = 1 +}; + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_prepopulate_blob_cache( + rocksdb_options_t* opt, int val); + +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_prepopulate_blob_cache( + rocksdb_options_t* opt); + +/* returns a pointer to a malloc()-ed, null terminated string */ +extern ROCKSDB_LIBRARY_API char* rocksdb_options_statistics_get_string( + rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_write_buffer_number( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_write_buffer_number( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_min_write_buffer_number_to_merge(rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_options_get_min_write_buffer_number_to_merge(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_max_write_buffer_number_to_maintain(rocksdb_options_t*, + int); +extern ROCKSDB_LIBRARY_API int +rocksdb_options_get_max_write_buffer_number_to_maintain(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_max_write_buffer_size_to_maintain(rocksdb_options_t*, + int64_t); +extern ROCKSDB_LIBRARY_API int64_t +rocksdb_options_get_max_write_buffer_size_to_maintain(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_enable_pipelined_write( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_enable_pipelined_write(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_unordered_write( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_unordered_write( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_subcompactions( + rocksdb_options_t*, uint32_t); +extern ROCKSDB_LIBRARY_API uint32_t +rocksdb_options_get_max_subcompactions(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_background_jobs( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_background_jobs( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_background_compactions( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_background_compactions( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_background_flushes( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_background_flushes( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_log_file_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_max_log_file_size(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_log_file_time_to_roll( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_log_file_time_to_roll(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_keep_log_file_num( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_keep_log_file_num(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_recycle_log_file_num( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_recycle_log_file_num(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_soft_pending_compaction_bytes_limit(rocksdb_options_t* opt, + size_t v); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_soft_pending_compaction_bytes_limit(rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_hard_pending_compaction_bytes_limit(rocksdb_options_t* opt, + size_t v); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_hard_pending_compaction_bytes_limit(rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_manifest_file_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_max_manifest_file_size(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_table_cache_numshardbits( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_table_cache_numshardbits( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_arena_block_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_arena_block_size(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_use_fsync( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_use_fsync( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_db_log_dir( + rocksdb_options_t*, const char*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_wal_dir(rocksdb_options_t*, + const char*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_WAL_ttl_seconds( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_WAL_ttl_seconds(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_WAL_size_limit_MB( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_WAL_size_limit_MB(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_manifest_preallocation_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_manifest_preallocation_size(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_allow_mmap_reads( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_allow_mmap_reads( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_allow_mmap_writes( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_allow_mmap_writes( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_use_direct_reads( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_use_direct_reads( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_use_direct_io_for_flush_and_compaction(rocksdb_options_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_use_direct_io_for_flush_and_compaction(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_is_fd_close_on_exec( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_is_fd_close_on_exec(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_stats_dump_period_sec( + rocksdb_options_t*, unsigned int); +extern ROCKSDB_LIBRARY_API unsigned int +rocksdb_options_get_stats_dump_period_sec(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_stats_persist_period_sec( + rocksdb_options_t*, unsigned int); +extern ROCKSDB_LIBRARY_API unsigned int +rocksdb_options_get_stats_persist_period_sec(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_advise_random_on_open( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_advise_random_on_open(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_access_hint_on_compaction_start(rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_options_get_access_hint_on_compaction_start(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_use_adaptive_mutex( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_use_adaptive_mutex( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_bytes_per_sync( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_bytes_per_sync(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_wal_bytes_per_sync( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_wal_bytes_per_sync(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_writable_file_max_buffer_size(rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_writable_file_max_buffer_size(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_allow_concurrent_memtable_write(rocksdb_options_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_allow_concurrent_memtable_write(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_enable_write_thread_adaptive_yield(rocksdb_options_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_enable_write_thread_adaptive_yield(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_max_sequential_skip_in_iterations(rocksdb_options_t*, + uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_max_sequential_skip_in_iterations(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_disable_auto_compactions( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_disable_auto_compactions(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_optimize_filters_for_hits( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_optimize_filters_for_hits(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_delete_obsolete_files_period_micros(rocksdb_options_t*, + uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_delete_obsolete_files_period_micros(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_prepare_for_bulk_load( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_memtable_vector_rep( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_memtable_prefix_bloom_size_ratio(rocksdb_options_t*, + double); +extern ROCKSDB_LIBRARY_API double +rocksdb_options_get_memtable_prefix_bloom_size_ratio(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_compaction_bytes( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_max_compaction_bytes(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_hash_skip_list_rep( + rocksdb_options_t*, size_t, int32_t, int32_t); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_hash_link_list_rep( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_plain_table_factory( + rocksdb_options_t*, uint32_t, int, double, size_t); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_min_level_to_compress( + rocksdb_options_t* opt, int level); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_memtable_huge_page_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_memtable_huge_page_size(rocksdb_options_t*); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_successive_merges( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_max_successive_merges(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_bloom_locality( + rocksdb_options_t*, uint32_t); +extern ROCKSDB_LIBRARY_API uint32_t +rocksdb_options_get_bloom_locality(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_inplace_update_support( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_inplace_update_support(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_inplace_update_num_locks( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_inplace_update_num_locks(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_report_bg_io_stats( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_report_bg_io_stats( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_avoid_unnecessary_blocking_io(rocksdb_options_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_avoid_unnecessary_blocking_io(rocksdb_options_t*); + +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_experimental_mempurge_threshold(rocksdb_options_t*, double); +extern ROCKSDB_LIBRARY_API double +rocksdb_options_get_experimental_mempurge_threshold(rocksdb_options_t*); + +enum { + rocksdb_tolerate_corrupted_tail_records_recovery = 0, + rocksdb_absolute_consistency_recovery = 1, + rocksdb_point_in_time_recovery = 2, + rocksdb_skip_any_corrupted_records_recovery = 3 +}; +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_wal_recovery_mode( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_wal_recovery_mode( + rocksdb_options_t*); + +enum { + rocksdb_no_compression = 0, + rocksdb_snappy_compression = 1, + rocksdb_zlib_compression = 2, + rocksdb_bz2_compression = 3, + rocksdb_lz4_compression = 4, + rocksdb_lz4hc_compression = 5, + rocksdb_xpress_compression = 6, + rocksdb_zstd_compression = 7 +}; +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compression( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_compression( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_bottommost_compression( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_bottommost_compression( + rocksdb_options_t*); + +enum { + rocksdb_level_compaction = 0, + rocksdb_universal_compaction = 1, + rocksdb_fifo_compaction = 2 +}; +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compaction_style( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_compaction_style( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_universal_compaction_options( + rocksdb_options_t*, rocksdb_universal_compaction_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_fifo_compaction_options( + rocksdb_options_t* opt, rocksdb_fifo_compaction_options_t* fifo); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_ratelimiter( + rocksdb_options_t* opt, rocksdb_ratelimiter_t* limiter); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_atomic_flush( + rocksdb_options_t* opt, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_atomic_flush( + rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_row_cache( + rocksdb_options_t* opt, rocksdb_cache_t* cache); + +extern ROCKSDB_LIBRARY_API void +rocksdb_options_add_compact_on_deletion_collector_factory( + rocksdb_options_t*, size_t window_size, size_t num_dels_trigger); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_manual_wal_flush( + rocksdb_options_t* opt, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_manual_wal_flush( + rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_wal_compression( + rocksdb_options_t* opt, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_wal_compression( + rocksdb_options_t* opt); + +/* RateLimiter */ +extern ROCKSDB_LIBRARY_API rocksdb_ratelimiter_t* rocksdb_ratelimiter_create( + int64_t rate_bytes_per_sec, int64_t refill_period_us, int32_t fairness); +extern ROCKSDB_LIBRARY_API void rocksdb_ratelimiter_destroy( + rocksdb_ratelimiter_t*); + +/* PerfContext */ +enum { + rocksdb_uninitialized = 0, + rocksdb_disable = 1, + rocksdb_enable_count = 2, + rocksdb_enable_time_except_for_mutex = 3, + rocksdb_enable_time = 4, + rocksdb_out_of_bounds = 5 +}; + +enum { + rocksdb_user_key_comparison_count = 0, + rocksdb_block_cache_hit_count, + rocksdb_block_read_count, + rocksdb_block_read_byte, + rocksdb_block_read_time, + rocksdb_block_checksum_time, + rocksdb_block_decompress_time, + rocksdb_get_read_bytes, + rocksdb_multiget_read_bytes, + rocksdb_iter_read_bytes, + rocksdb_internal_key_skipped_count, + rocksdb_internal_delete_skipped_count, + rocksdb_internal_recent_skipped_count, + rocksdb_internal_merge_count, + rocksdb_get_snapshot_time, + rocksdb_get_from_memtable_time, + rocksdb_get_from_memtable_count, + rocksdb_get_post_process_time, + rocksdb_get_from_output_files_time, + rocksdb_seek_on_memtable_time, + rocksdb_seek_on_memtable_count, + rocksdb_next_on_memtable_count, + rocksdb_prev_on_memtable_count, + rocksdb_seek_child_seek_time, + rocksdb_seek_child_seek_count, + rocksdb_seek_min_heap_time, + rocksdb_seek_max_heap_time, + rocksdb_seek_internal_seek_time, + rocksdb_find_next_user_entry_time, + rocksdb_write_wal_time, + rocksdb_write_memtable_time, + rocksdb_write_delay_time, + rocksdb_write_pre_and_post_process_time, + rocksdb_db_mutex_lock_nanos, + rocksdb_db_condition_wait_nanos, + rocksdb_merge_operator_time_nanos, + rocksdb_read_index_block_nanos, + rocksdb_read_filter_block_nanos, + rocksdb_new_table_block_iter_nanos, + rocksdb_new_table_iterator_nanos, + rocksdb_block_seek_nanos, + rocksdb_find_table_nanos, + rocksdb_bloom_memtable_hit_count, + rocksdb_bloom_memtable_miss_count, + rocksdb_bloom_sst_hit_count, + rocksdb_bloom_sst_miss_count, + rocksdb_key_lock_wait_time, + rocksdb_key_lock_wait_count, + rocksdb_env_new_sequential_file_nanos, + rocksdb_env_new_random_access_file_nanos, + rocksdb_env_new_writable_file_nanos, + rocksdb_env_reuse_writable_file_nanos, + rocksdb_env_new_random_rw_file_nanos, + rocksdb_env_new_directory_nanos, + rocksdb_env_file_exists_nanos, + rocksdb_env_get_children_nanos, + rocksdb_env_get_children_file_attributes_nanos, + rocksdb_env_delete_file_nanos, + rocksdb_env_create_dir_nanos, + rocksdb_env_create_dir_if_missing_nanos, + rocksdb_env_delete_dir_nanos, + rocksdb_env_get_file_size_nanos, + rocksdb_env_get_file_modification_time_nanos, + rocksdb_env_rename_file_nanos, + rocksdb_env_link_file_nanos, + rocksdb_env_lock_file_nanos, + rocksdb_env_unlock_file_nanos, + rocksdb_env_new_logger_nanos, + rocksdb_number_async_seek, + rocksdb_blob_cache_hit_count, + rocksdb_blob_read_count, + rocksdb_blob_read_byte, + rocksdb_blob_read_time, + rocksdb_blob_checksum_time, + rocksdb_blob_decompress_time, + rocksdb_internal_range_del_reseek_count, + rocksdb_total_metric_count = 78 +}; + +extern ROCKSDB_LIBRARY_API void rocksdb_set_perf_level(int); +extern ROCKSDB_LIBRARY_API rocksdb_perfcontext_t* rocksdb_perfcontext_create( + void); +extern ROCKSDB_LIBRARY_API void rocksdb_perfcontext_reset( + rocksdb_perfcontext_t* context); +extern ROCKSDB_LIBRARY_API char* rocksdb_perfcontext_report( + rocksdb_perfcontext_t* context, unsigned char exclude_zero_counters); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_perfcontext_metric(rocksdb_perfcontext_t* context, int metric); +extern ROCKSDB_LIBRARY_API void rocksdb_perfcontext_destroy( + rocksdb_perfcontext_t* context); + +/* Compaction Filter */ + +extern ROCKSDB_LIBRARY_API rocksdb_compactionfilter_t* +rocksdb_compactionfilter_create( + void* state, void (*destructor)(void*), + unsigned char (*filter)(void*, int level, const char* key, + size_t key_length, const char* existing_value, + size_t value_length, char** new_value, + size_t* new_value_length, + unsigned char* value_changed), + const char* (*name)(void*)); +extern ROCKSDB_LIBRARY_API void rocksdb_compactionfilter_set_ignore_snapshots( + rocksdb_compactionfilter_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_compactionfilter_destroy( + rocksdb_compactionfilter_t*); + +/* Compaction Filter Context */ + +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_compactionfiltercontext_is_full_compaction( + rocksdb_compactionfiltercontext_t* context); + +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_compactionfiltercontext_is_manual_compaction( + rocksdb_compactionfiltercontext_t* context); + +/* Compaction Filter Factory */ + +extern ROCKSDB_LIBRARY_API rocksdb_compactionfilterfactory_t* +rocksdb_compactionfilterfactory_create( + void* state, void (*destructor)(void*), + rocksdb_compactionfilter_t* (*create_compaction_filter)( + void*, rocksdb_compactionfiltercontext_t* context), + const char* (*name)(void*)); +extern ROCKSDB_LIBRARY_API void rocksdb_compactionfilterfactory_destroy( + rocksdb_compactionfilterfactory_t*); + +/* Comparator */ + +extern ROCKSDB_LIBRARY_API rocksdb_comparator_t* rocksdb_comparator_create( + void* state, void (*destructor)(void*), + int (*compare)(void*, const char* a, size_t alen, const char* b, + size_t blen), + const char* (*name)(void*)); +extern ROCKSDB_LIBRARY_API void rocksdb_comparator_destroy( + rocksdb_comparator_t*); + +extern ROCKSDB_LIBRARY_API rocksdb_comparator_t* +rocksdb_comparator_with_ts_create( + void* state, void (*destructor)(void*), + int (*compare)(void*, const char* a, size_t alen, const char* b, + size_t blen), + int (*compare_ts)(void*, const char* a_ts, size_t a_tslen, const char* b_ts, + size_t b_tslen), + int (*compare_without_ts)(void*, const char* a, size_t alen, + unsigned char a_has_ts, const char* b, + size_t blen, unsigned char b_has_ts), + const char* (*name)(void*), size_t timestamp_size); + +/* Filter policy */ + +extern ROCKSDB_LIBRARY_API void rocksdb_filterpolicy_destroy( + rocksdb_filterpolicy_t*); + +extern ROCKSDB_LIBRARY_API rocksdb_filterpolicy_t* +rocksdb_filterpolicy_create_bloom(double bits_per_key); +extern ROCKSDB_LIBRARY_API rocksdb_filterpolicy_t* +rocksdb_filterpolicy_create_bloom_full(double bits_per_key); +extern ROCKSDB_LIBRARY_API rocksdb_filterpolicy_t* +rocksdb_filterpolicy_create_ribbon(double bloom_equivalent_bits_per_key); +extern ROCKSDB_LIBRARY_API rocksdb_filterpolicy_t* +rocksdb_filterpolicy_create_ribbon_hybrid(double bloom_equivalent_bits_per_key, + int bloom_before_level); + +/* Merge Operator */ + +extern ROCKSDB_LIBRARY_API rocksdb_mergeoperator_t* +rocksdb_mergeoperator_create( + void* state, void (*destructor)(void*), + char* (*full_merge)(void*, const char* key, size_t key_length, + const char* existing_value, + size_t existing_value_length, + const char* const* operands_list, + const size_t* operands_list_length, int num_operands, + unsigned char* success, size_t* new_value_length), + char* (*partial_merge)(void*, const char* key, size_t key_length, + const char* const* operands_list, + const size_t* operands_list_length, int num_operands, + unsigned char* success, size_t* new_value_length), + void (*delete_value)(void*, const char* value, size_t value_length), + const char* (*name)(void*)); +extern ROCKSDB_LIBRARY_API void rocksdb_mergeoperator_destroy( + rocksdb_mergeoperator_t*); + +/* Read options */ + +extern ROCKSDB_LIBRARY_API rocksdb_readoptions_t* rocksdb_readoptions_create( + void); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_destroy( + rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_verify_checksums( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_readoptions_get_verify_checksums(rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_fill_cache( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_readoptions_get_fill_cache( + rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_snapshot( + rocksdb_readoptions_t*, const rocksdb_snapshot_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_iterate_upper_bound( + rocksdb_readoptions_t*, const char* key, size_t keylen); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_iterate_lower_bound( + rocksdb_readoptions_t*, const char* key, size_t keylen); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_read_tier( + rocksdb_readoptions_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_readoptions_get_read_tier( + rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_tailing( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_readoptions_get_tailing( + rocksdb_readoptions_t*); +// The functionality that this option controlled has been removed. +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_managed( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_readahead_size( + rocksdb_readoptions_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_readoptions_get_readahead_size(rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_prefix_same_as_start( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_readoptions_get_prefix_same_as_start(rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_pin_data( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_readoptions_get_pin_data( + rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_total_order_seek( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_readoptions_get_total_order_seek(rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_readoptions_set_max_skippable_internal_keys(rocksdb_readoptions_t*, + uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_readoptions_get_max_skippable_internal_keys(rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_readoptions_set_background_purge_on_iterator_cleanup( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_readoptions_get_background_purge_on_iterator_cleanup( + rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_ignore_range_deletions( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_readoptions_get_ignore_range_deletions(rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_deadline( + rocksdb_readoptions_t*, uint64_t microseconds); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_readoptions_get_deadline(rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_io_timeout( + rocksdb_readoptions_t*, uint64_t microseconds); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_readoptions_get_io_timeout(rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_async_io( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_readoptions_get_async_io( + rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_timestamp( + rocksdb_readoptions_t*, const char* ts, size_t tslen); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_iter_start_ts( + rocksdb_readoptions_t*, const char* ts, size_t tslen); + +/* Write options */ + +extern ROCKSDB_LIBRARY_API rocksdb_writeoptions_t* rocksdb_writeoptions_create( + void); +extern ROCKSDB_LIBRARY_API void rocksdb_writeoptions_destroy( + rocksdb_writeoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writeoptions_set_sync( + rocksdb_writeoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_writeoptions_get_sync( + rocksdb_writeoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writeoptions_disable_WAL( + rocksdb_writeoptions_t* opt, int disable); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_writeoptions_get_disable_WAL( + rocksdb_writeoptions_t* opt); +extern ROCKSDB_LIBRARY_API void +rocksdb_writeoptions_set_ignore_missing_column_families(rocksdb_writeoptions_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_writeoptions_get_ignore_missing_column_families( + rocksdb_writeoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writeoptions_set_no_slowdown( + rocksdb_writeoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_writeoptions_get_no_slowdown( + rocksdb_writeoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writeoptions_set_low_pri( + rocksdb_writeoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_writeoptions_get_low_pri( + rocksdb_writeoptions_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_writeoptions_set_memtable_insert_hint_per_batch(rocksdb_writeoptions_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_writeoptions_get_memtable_insert_hint_per_batch( + rocksdb_writeoptions_t*); + +/* Compact range options */ + +extern ROCKSDB_LIBRARY_API rocksdb_compactoptions_t* +rocksdb_compactoptions_create(void); +extern ROCKSDB_LIBRARY_API void rocksdb_compactoptions_destroy( + rocksdb_compactoptions_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_compactoptions_set_exclusive_manual_compaction( + rocksdb_compactoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_compactoptions_get_exclusive_manual_compaction( + rocksdb_compactoptions_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_compactoptions_set_bottommost_level_compaction( + rocksdb_compactoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_compactoptions_get_bottommost_level_compaction( + rocksdb_compactoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_compactoptions_set_change_level( + rocksdb_compactoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_compactoptions_get_change_level(rocksdb_compactoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_compactoptions_set_target_level( + rocksdb_compactoptions_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_compactoptions_get_target_level( + rocksdb_compactoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_compactoptions_set_full_history_ts_low( + rocksdb_compactoptions_t*, char* ts, size_t tslen); + +/* Flush options */ + +extern ROCKSDB_LIBRARY_API rocksdb_flushoptions_t* rocksdb_flushoptions_create( + void); +extern ROCKSDB_LIBRARY_API void rocksdb_flushoptions_destroy( + rocksdb_flushoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_flushoptions_set_wait( + rocksdb_flushoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_flushoptions_get_wait( + rocksdb_flushoptions_t*); + +/* Memory allocator */ + +extern ROCKSDB_LIBRARY_API rocksdb_memory_allocator_t* +rocksdb_jemalloc_nodump_allocator_create(char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_memory_allocator_destroy( + rocksdb_memory_allocator_t*); + +/* Cache */ + +extern ROCKSDB_LIBRARY_API rocksdb_lru_cache_options_t* +rocksdb_lru_cache_options_create(void); +extern ROCKSDB_LIBRARY_API void rocksdb_lru_cache_options_destroy( + rocksdb_lru_cache_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_lru_cache_options_set_capacity( + rocksdb_lru_cache_options_t*, size_t); +extern ROCKSDB_LIBRARY_API void rocksdb_lru_cache_options_set_num_shard_bits( + rocksdb_lru_cache_options_t*, int); +extern ROCKSDB_LIBRARY_API void rocksdb_lru_cache_options_set_memory_allocator( + rocksdb_lru_cache_options_t*, rocksdb_memory_allocator_t*); + +extern ROCKSDB_LIBRARY_API rocksdb_cache_t* rocksdb_cache_create_lru( + size_t capacity); +extern ROCKSDB_LIBRARY_API rocksdb_cache_t* +rocksdb_cache_create_lru_with_strict_capacity_limit(size_t capacity); +extern ROCKSDB_LIBRARY_API rocksdb_cache_t* rocksdb_cache_create_lru_opts( + rocksdb_lru_cache_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_cache_destroy(rocksdb_cache_t* cache); +extern ROCKSDB_LIBRARY_API void rocksdb_cache_disown_data( + rocksdb_cache_t* cache); +extern ROCKSDB_LIBRARY_API void rocksdb_cache_set_capacity( + rocksdb_cache_t* cache, size_t capacity); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_cache_get_capacity(rocksdb_cache_t* cache); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_cache_get_usage(rocksdb_cache_t* cache); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_cache_get_pinned_usage(rocksdb_cache_t* cache); + +/* HyperClockCache */ +extern ROCKSDB_LIBRARY_API rocksdb_hyper_clock_cache_options_t* +rocksdb_hyper_clock_cache_options_create(size_t capacity, + size_t estimated_entry_charge); +extern ROCKSDB_LIBRARY_API void rocksdb_hyper_clock_cache_options_destroy( + rocksdb_hyper_clock_cache_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_hyper_clock_cache_options_set_capacity( + rocksdb_hyper_clock_cache_options_t*, size_t); +extern ROCKSDB_LIBRARY_API void +rocksdb_hyper_clock_cache_options_set_estimated_entry_charge( + rocksdb_hyper_clock_cache_options_t*, size_t); +extern ROCKSDB_LIBRARY_API void +rocksdb_hyper_clock_cache_options_set_num_shard_bits( + rocksdb_hyper_clock_cache_options_t*, int); +extern ROCKSDB_LIBRARY_API void +rocksdb_hyper_clock_cache_options_set_memory_allocator( + rocksdb_hyper_clock_cache_options_t*, rocksdb_memory_allocator_t*); + +extern ROCKSDB_LIBRARY_API rocksdb_cache_t* rocksdb_cache_create_hyper_clock( + size_t capacity, size_t estimated_entry_charge); +extern ROCKSDB_LIBRARY_API rocksdb_cache_t* +rocksdb_cache_create_hyper_clock_opts(rocksdb_hyper_clock_cache_options_t*); + +/* DBPath */ + +extern ROCKSDB_LIBRARY_API rocksdb_dbpath_t* rocksdb_dbpath_create( + const char* path, uint64_t target_size); +extern ROCKSDB_LIBRARY_API void rocksdb_dbpath_destroy(rocksdb_dbpath_t*); + +/* Env */ + +extern ROCKSDB_LIBRARY_API rocksdb_env_t* rocksdb_create_default_env(void); +extern ROCKSDB_LIBRARY_API rocksdb_env_t* rocksdb_create_mem_env(void); +extern ROCKSDB_LIBRARY_API void rocksdb_env_set_background_threads( + rocksdb_env_t* env, int n); +extern ROCKSDB_LIBRARY_API int rocksdb_env_get_background_threads( + rocksdb_env_t* env); +extern ROCKSDB_LIBRARY_API void +rocksdb_env_set_high_priority_background_threads(rocksdb_env_t* env, int n); +extern ROCKSDB_LIBRARY_API int rocksdb_env_get_high_priority_background_threads( + rocksdb_env_t* env); +extern ROCKSDB_LIBRARY_API void rocksdb_env_set_low_priority_background_threads( + rocksdb_env_t* env, int n); +extern ROCKSDB_LIBRARY_API int rocksdb_env_get_low_priority_background_threads( + rocksdb_env_t* env); +extern ROCKSDB_LIBRARY_API void +rocksdb_env_set_bottom_priority_background_threads(rocksdb_env_t* env, int n); +extern ROCKSDB_LIBRARY_API int +rocksdb_env_get_bottom_priority_background_threads(rocksdb_env_t* env); +extern ROCKSDB_LIBRARY_API void rocksdb_env_join_all_threads( + rocksdb_env_t* env); +extern ROCKSDB_LIBRARY_API void rocksdb_env_lower_thread_pool_io_priority( + rocksdb_env_t* env); +extern ROCKSDB_LIBRARY_API void +rocksdb_env_lower_high_priority_thread_pool_io_priority(rocksdb_env_t* env); +extern ROCKSDB_LIBRARY_API void rocksdb_env_lower_thread_pool_cpu_priority( + rocksdb_env_t* env); +extern ROCKSDB_LIBRARY_API void +rocksdb_env_lower_high_priority_thread_pool_cpu_priority(rocksdb_env_t* env); + +extern ROCKSDB_LIBRARY_API void rocksdb_env_destroy(rocksdb_env_t*); + +extern ROCKSDB_LIBRARY_API rocksdb_envoptions_t* rocksdb_envoptions_create( + void); +extern ROCKSDB_LIBRARY_API void rocksdb_envoptions_destroy( + rocksdb_envoptions_t* opt); +extern ROCKSDB_LIBRARY_API void rocksdb_create_dir_if_missing( + rocksdb_env_t* env, const char* path, char** errptr); + +/* SstFile */ + +extern ROCKSDB_LIBRARY_API rocksdb_sstfilewriter_t* +rocksdb_sstfilewriter_create(const rocksdb_envoptions_t* env, + const rocksdb_options_t* io_options); +extern ROCKSDB_LIBRARY_API rocksdb_sstfilewriter_t* +rocksdb_sstfilewriter_create_with_comparator( + const rocksdb_envoptions_t* env, const rocksdb_options_t* io_options, + const rocksdb_comparator_t* comparator); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_open( + rocksdb_sstfilewriter_t* writer, const char* name, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_add( + rocksdb_sstfilewriter_t* writer, const char* key, size_t keylen, + const char* val, size_t vallen, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_put( + rocksdb_sstfilewriter_t* writer, const char* key, size_t keylen, + const char* val, size_t vallen, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_put_with_ts( + rocksdb_sstfilewriter_t* writer, const char* key, size_t keylen, + const char* ts, size_t tslen, const char* val, size_t vallen, + char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_merge( + rocksdb_sstfilewriter_t* writer, const char* key, size_t keylen, + const char* val, size_t vallen, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_delete( + rocksdb_sstfilewriter_t* writer, const char* key, size_t keylen, + char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_delete_with_ts( + rocksdb_sstfilewriter_t* writer, const char* key, size_t keylen, + const char* ts, size_t tslen, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_delete_range( + rocksdb_sstfilewriter_t* writer, const char* begin_key, size_t begin_keylen, + const char* end_key, size_t end_keylen, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_finish( + rocksdb_sstfilewriter_t* writer, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_file_size( + rocksdb_sstfilewriter_t* writer, uint64_t* file_size); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_destroy( + rocksdb_sstfilewriter_t* writer); +extern ROCKSDB_LIBRARY_API rocksdb_ingestexternalfileoptions_t* +rocksdb_ingestexternalfileoptions_create(void); +extern ROCKSDB_LIBRARY_API void +rocksdb_ingestexternalfileoptions_set_move_files( + rocksdb_ingestexternalfileoptions_t* opt, unsigned char move_files); +extern ROCKSDB_LIBRARY_API void +rocksdb_ingestexternalfileoptions_set_snapshot_consistency( + rocksdb_ingestexternalfileoptions_t* opt, + unsigned char snapshot_consistency); +extern ROCKSDB_LIBRARY_API void +rocksdb_ingestexternalfileoptions_set_allow_global_seqno( + rocksdb_ingestexternalfileoptions_t* opt, unsigned char allow_global_seqno); +extern ROCKSDB_LIBRARY_API void +rocksdb_ingestexternalfileoptions_set_allow_blocking_flush( + rocksdb_ingestexternalfileoptions_t* opt, + unsigned char allow_blocking_flush); +extern ROCKSDB_LIBRARY_API void +rocksdb_ingestexternalfileoptions_set_ingest_behind( + rocksdb_ingestexternalfileoptions_t* opt, unsigned char ingest_behind); +extern ROCKSDB_LIBRARY_API void +rocksdb_ingestexternalfileoptions_set_fail_if_not_bottommost_level( + rocksdb_ingestexternalfileoptions_t* opt, + unsigned char fail_if_not_bottommost_level); + +extern ROCKSDB_LIBRARY_API void rocksdb_ingestexternalfileoptions_destroy( + rocksdb_ingestexternalfileoptions_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_ingest_external_file( + rocksdb_t* db, const char* const* file_list, const size_t list_len, + const rocksdb_ingestexternalfileoptions_t* opt, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_ingest_external_file_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* handle, + const char* const* file_list, const size_t list_len, + const rocksdb_ingestexternalfileoptions_t* opt, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_try_catch_up_with_primary( + rocksdb_t* db, char** errptr); + +/* SliceTransform */ + +extern ROCKSDB_LIBRARY_API rocksdb_slicetransform_t* +rocksdb_slicetransform_create( + void* state, void (*destructor)(void*), + char* (*transform)(void*, const char* key, size_t length, + size_t* dst_length), + unsigned char (*in_domain)(void*, const char* key, size_t length), + unsigned char (*in_range)(void*, const char* key, size_t length), + const char* (*name)(void*)); +extern ROCKSDB_LIBRARY_API rocksdb_slicetransform_t* + rocksdb_slicetransform_create_fixed_prefix(size_t); +extern ROCKSDB_LIBRARY_API rocksdb_slicetransform_t* +rocksdb_slicetransform_create_noop(void); +extern ROCKSDB_LIBRARY_API void rocksdb_slicetransform_destroy( + rocksdb_slicetransform_t*); + +/* Universal Compaction options */ + +enum { + rocksdb_similar_size_compaction_stop_style = 0, + rocksdb_total_size_compaction_stop_style = 1 +}; + +extern ROCKSDB_LIBRARY_API rocksdb_universal_compaction_options_t* +rocksdb_universal_compaction_options_create(void); +extern ROCKSDB_LIBRARY_API void +rocksdb_universal_compaction_options_set_size_ratio( + rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_universal_compaction_options_get_size_ratio( + rocksdb_universal_compaction_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_universal_compaction_options_set_min_merge_width( + rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_universal_compaction_options_get_min_merge_width( + rocksdb_universal_compaction_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_universal_compaction_options_set_max_merge_width( + rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_universal_compaction_options_get_max_merge_width( + rocksdb_universal_compaction_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_universal_compaction_options_set_max_size_amplification_percent( + rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_universal_compaction_options_get_max_size_amplification_percent( + rocksdb_universal_compaction_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_universal_compaction_options_set_compression_size_percent( + rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_universal_compaction_options_get_compression_size_percent( + rocksdb_universal_compaction_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_universal_compaction_options_set_stop_style( + rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_universal_compaction_options_get_stop_style( + rocksdb_universal_compaction_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_universal_compaction_options_destroy( + rocksdb_universal_compaction_options_t*); + +extern ROCKSDB_LIBRARY_API rocksdb_fifo_compaction_options_t* +rocksdb_fifo_compaction_options_create(void); +extern ROCKSDB_LIBRARY_API void +rocksdb_fifo_compaction_options_set_allow_compaction( + rocksdb_fifo_compaction_options_t* fifo_opts, unsigned char allow_compaction); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_fifo_compaction_options_get_allow_compaction( + rocksdb_fifo_compaction_options_t* fifo_opts); +extern ROCKSDB_LIBRARY_API void +rocksdb_fifo_compaction_options_set_max_table_files_size( + rocksdb_fifo_compaction_options_t* fifo_opts, uint64_t size); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_fifo_compaction_options_get_max_table_files_size( + rocksdb_fifo_compaction_options_t* fifo_opts); +extern ROCKSDB_LIBRARY_API void rocksdb_fifo_compaction_options_destroy( + rocksdb_fifo_compaction_options_t* fifo_opts); + +extern ROCKSDB_LIBRARY_API int rocksdb_livefiles_count( + const rocksdb_livefiles_t*); +extern ROCKSDB_LIBRARY_API const char* rocksdb_livefiles_column_family_name( + const rocksdb_livefiles_t*, int index); +extern ROCKSDB_LIBRARY_API const char* rocksdb_livefiles_name( + const rocksdb_livefiles_t*, int index); +extern ROCKSDB_LIBRARY_API int rocksdb_livefiles_level( + const rocksdb_livefiles_t*, int index); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_livefiles_size(const rocksdb_livefiles_t*, int index); +extern ROCKSDB_LIBRARY_API const char* rocksdb_livefiles_smallestkey( + const rocksdb_livefiles_t*, int index, size_t* size); +extern ROCKSDB_LIBRARY_API const char* rocksdb_livefiles_largestkey( + const rocksdb_livefiles_t*, int index, size_t* size); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_livefiles_entries(const rocksdb_livefiles_t*, int index); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_livefiles_deletions(const rocksdb_livefiles_t*, int index); +extern ROCKSDB_LIBRARY_API void rocksdb_livefiles_destroy( + const rocksdb_livefiles_t*); + +/* Utility Helpers */ + +extern ROCKSDB_LIBRARY_API void rocksdb_get_options_from_string( + const rocksdb_options_t* base_options, const char* opts_str, + rocksdb_options_t* new_options, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete_file_in_range( + rocksdb_t* db, const char* start_key, size_t start_key_len, + const char* limit_key, size_t limit_key_len, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete_file_in_range_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + const char* start_key, size_t start_key_len, const char* limit_key, + size_t limit_key_len, char** errptr); + +/* MetaData */ + +extern ROCKSDB_LIBRARY_API rocksdb_column_family_metadata_t* +rocksdb_get_column_family_metadata(rocksdb_t* db); + +/** + * Returns the rocksdb_column_family_metadata_t of the specified + * column family. + * + * Note that the caller is responsible to release the returned memory + * using rocksdb_column_family_metadata_destroy. + */ +extern ROCKSDB_LIBRARY_API rocksdb_column_family_metadata_t* +rocksdb_get_column_family_metadata_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family); + +extern ROCKSDB_LIBRARY_API void rocksdb_column_family_metadata_destroy( + rocksdb_column_family_metadata_t* cf_meta); + +extern ROCKSDB_LIBRARY_API uint64_t rocksdb_column_family_metadata_get_size( + rocksdb_column_family_metadata_t* cf_meta); + +extern ROCKSDB_LIBRARY_API size_t rocksdb_column_family_metadata_get_file_count( + rocksdb_column_family_metadata_t* cf_meta); + +extern ROCKSDB_LIBRARY_API char* rocksdb_column_family_metadata_get_name( + rocksdb_column_family_metadata_t* cf_meta); + +extern ROCKSDB_LIBRARY_API size_t +rocksdb_column_family_metadata_get_level_count( + rocksdb_column_family_metadata_t* cf_meta); + +/** + * Returns the rocksdb_level_metadata_t of the ith level from the specified + * column family metadata. + * + * If the specified i is greater than or equal to the number of levels + * in the specified column family, then NULL will be returned. + * + * Note that the caller is responsible to release the returned memory + * using rocksdb_level_metadata_destroy before releasing its parent + * rocksdb_column_family_metadata_t. + */ +extern ROCKSDB_LIBRARY_API rocksdb_level_metadata_t* +rocksdb_column_family_metadata_get_level_metadata( + rocksdb_column_family_metadata_t* cf_meta, size_t i); + +/** + * Releases the specified rocksdb_level_metadata_t. + * + * Note that the specified rocksdb_level_metadata_t must be released + * before the release of its parent rocksdb_column_family_metadata_t. + */ +extern ROCKSDB_LIBRARY_API void rocksdb_level_metadata_destroy( + rocksdb_level_metadata_t* level_meta); + +extern ROCKSDB_LIBRARY_API int rocksdb_level_metadata_get_level( + rocksdb_level_metadata_t* level_meta); + +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_level_metadata_get_size(rocksdb_level_metadata_t* level_meta); + +extern ROCKSDB_LIBRARY_API size_t +rocksdb_level_metadata_get_file_count(rocksdb_level_metadata_t* level_meta); + +/** + * Returns the sst_file_metadata_t of the ith file from the specified level + * metadata. + * + * If the specified i is greater than or equal to the number of files + * in the specified level, then NULL will be returned. + * + * Note that the caller is responsible to release the returned memory + * using rocksdb_sst_file_metadata_destroy before releasing its + * parent rocksdb_level_metadata_t. + */ +extern ROCKSDB_LIBRARY_API rocksdb_sst_file_metadata_t* +rocksdb_level_metadata_get_sst_file_metadata( + rocksdb_level_metadata_t* level_meta, size_t i); + +/** + * Releases the specified rocksdb_sst_file_metadata_t. + * + * Note that the specified rocksdb_sst_file_metadata_t must be released + * before the release of its parent rocksdb_level_metadata_t. + */ +extern ROCKSDB_LIBRARY_API void rocksdb_sst_file_metadata_destroy( + rocksdb_sst_file_metadata_t* file_meta); + +extern ROCKSDB_LIBRARY_API char* +rocksdb_sst_file_metadata_get_relative_filename( + rocksdb_sst_file_metadata_t* file_meta); + +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_sst_file_metadata_get_size(rocksdb_sst_file_metadata_t* file_meta); + +/** + * Returns the smallest key of the specified sst file. + * The caller is responsible for releasing the returned memory. + * + * @param file_meta the metadata of an SST file to obtain its smallest key. + * @param len the out value which will contain the length of the returned key + * after the function call. + */ +extern ROCKSDB_LIBRARY_API char* rocksdb_sst_file_metadata_get_smallestkey( + rocksdb_sst_file_metadata_t* file_meta, size_t* len); + +/** + * Returns the smallest key of the specified sst file. + * The caller is responsible for releasing the returned memory. + * + * @param file_meta the metadata of an SST file to obtain its smallest key. + * @param len the out value which will contain the length of the returned key + * after the function call. + */ +extern ROCKSDB_LIBRARY_API char* rocksdb_sst_file_metadata_get_largestkey( + rocksdb_sst_file_metadata_t* file_meta, size_t* len); + +/* Transactions */ + +extern ROCKSDB_LIBRARY_API rocksdb_column_family_handle_t* +rocksdb_transactiondb_create_column_family( + rocksdb_transactiondb_t* txn_db, + const rocksdb_options_t* column_family_options, + const char* column_family_name, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_transactiondb_t* rocksdb_transactiondb_open( + const rocksdb_options_t* options, + const rocksdb_transactiondb_options_t* txn_db_options, const char* name, + char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_transactiondb_t* +rocksdb_transactiondb_open_column_families( + const rocksdb_options_t* options, + const rocksdb_transactiondb_options_t* txn_db_options, const char* name, + int num_column_families, const char* const* column_family_names, + const rocksdb_options_t* const* column_family_options, + rocksdb_column_family_handle_t** column_family_handles, char** errptr); + +extern ROCKSDB_LIBRARY_API const rocksdb_snapshot_t* +rocksdb_transactiondb_create_snapshot(rocksdb_transactiondb_t* txn_db); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_release_snapshot( + rocksdb_transactiondb_t* txn_db, const rocksdb_snapshot_t* snapshot); + +extern ROCKSDB_LIBRARY_API char* rocksdb_transactiondb_property_value( + rocksdb_transactiondb_t* db, const char* propname); + +extern ROCKSDB_LIBRARY_API int rocksdb_transactiondb_property_int( + rocksdb_transactiondb_t* db, const char* propname, uint64_t* out_val); + +extern ROCKSDB_LIBRARY_API rocksdb_transaction_t* rocksdb_transaction_begin( + rocksdb_transactiondb_t* txn_db, + const rocksdb_writeoptions_t* write_options, + const rocksdb_transaction_options_t* txn_options, + rocksdb_transaction_t* old_txn); + +extern ROCKSDB_LIBRARY_API rocksdb_transaction_t** +rocksdb_transactiondb_get_prepared_transactions(rocksdb_transactiondb_t* txn_db, + size_t* cnt); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_set_name( + rocksdb_transaction_t* txn, const char* name, size_t name_len, + char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_transaction_get_name( + rocksdb_transaction_t* txn, size_t* name_len); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_prepare( + rocksdb_transaction_t* txn, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_commit( + rocksdb_transaction_t* txn, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_rollback( + rocksdb_transaction_t* txn, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_set_savepoint( + rocksdb_transaction_t* txn); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_rollback_to_savepoint( + rocksdb_transaction_t* txn, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_destroy( + rocksdb_transaction_t* txn); + +extern ROCKSDB_LIBRARY_API rocksdb_writebatch_wi_t* +rocksdb_transaction_get_writebatch_wi(rocksdb_transaction_t* txn); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_rebuild_from_writebatch( + rocksdb_transaction_t* txn, rocksdb_writebatch_t* writebatch, + char** errptr); + +// This rocksdb_writebatch_wi_t should be freed with rocksdb_free +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_rebuild_from_writebatch_wi( + rocksdb_transaction_t* txn, rocksdb_writebatch_wi_t* wi, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_set_commit_timestamp( + rocksdb_transaction_t* txn, uint64_t commit_timestamp); + +extern ROCKSDB_LIBRARY_API void +rocksdb_transaction_set_read_timestamp_for_validation( + rocksdb_transaction_t* txn, uint64_t read_timestamp); + +// This snapshot should be freed using rocksdb_free +extern ROCKSDB_LIBRARY_API const rocksdb_snapshot_t* +rocksdb_transaction_get_snapshot(rocksdb_transaction_t* txn); + +extern ROCKSDB_LIBRARY_API char* rocksdb_transaction_get( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + const char* key, size_t klen, size_t* vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* +rocksdb_transaction_get_pinned(rocksdb_transaction_t* txn, + const rocksdb_readoptions_t* options, + const char* key, size_t klen, char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_transaction_get_cf( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, size_t klen, + size_t* vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* +rocksdb_transaction_get_pinned_cf(rocksdb_transaction_t* txn, + const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_transaction_get_for_update( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + const char* key, size_t klen, size_t* vlen, unsigned char exclusive, + char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* +rocksdb_transaction_get_pinned_for_update(rocksdb_transaction_t* txn, + const rocksdb_readoptions_t* options, + const char* key, size_t klen, + unsigned char exclusive, + char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_transaction_get_for_update_cf( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, size_t klen, + size_t* vlen, unsigned char exclusive, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* +rocksdb_transaction_get_pinned_for_update_cf( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, size_t klen, + unsigned char exclusive, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_multi_get( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + size_t num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, char** values_list, + size_t* values_list_sizes, char** errs); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_multi_get_cf( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + const rocksdb_column_family_handle_t* const* column_families, + size_t num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, char** values_list, + size_t* values_list_sizes, char** errs); + +extern ROCKSDB_LIBRARY_API char* rocksdb_transactiondb_get( + rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options, + const char* key, size_t klen, size_t* vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* +rocksdb_transactiondb_get_pinned(rocksdb_transactiondb_t* txn_db, + const rocksdb_readoptions_t* options, + const char* key, size_t klen, char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_transactiondb_get_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, size_t* vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* +rocksdb_transactiondb_get_pinned_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_multi_get( + rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options, + size_t num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, char** values_list, + size_t* values_list_sizes, char** errs); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_multi_get_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options, + const rocksdb_column_family_handle_t* const* column_families, + size_t num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, char** values_list, + size_t* values_list_sizes, char** errs); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_put( + rocksdb_transaction_t* txn, const char* key, size_t klen, const char* val, + size_t vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_put_cf( + rocksdb_transaction_t* txn, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* val, size_t vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_put( + rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, + const char* key, size_t klen, const char* val, size_t vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_put_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, const char* val, size_t vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_write( + rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, + rocksdb_writebatch_t* batch, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_merge( + rocksdb_transaction_t* txn, const char* key, size_t klen, const char* val, + size_t vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_merge_cf( + rocksdb_transaction_t* txn, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* val, size_t vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_merge( + rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, + const char* key, size_t klen, const char* val, size_t vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_merge_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, size_t klen, + const char* val, size_t vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_delete( + rocksdb_transaction_t* txn, const char* key, size_t klen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_delete_cf( + rocksdb_transaction_t* txn, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_delete( + rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, + const char* key, size_t klen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_delete_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* +rocksdb_transaction_create_iterator(rocksdb_transaction_t* txn, + const rocksdb_readoptions_t* options); + +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* +rocksdb_transaction_create_iterator_cf( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family); + +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* +rocksdb_transactiondb_create_iterator(rocksdb_transactiondb_t* txn_db, + const rocksdb_readoptions_t* options); + +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* +rocksdb_transactiondb_create_iterator_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_close( + rocksdb_transactiondb_t* txn_db); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_flush( + rocksdb_transactiondb_t* txn_db, const rocksdb_flushoptions_t* options, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_flush_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_flushoptions_t* options, + rocksdb_column_family_handle_t* column_family, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_flush_cfs( + rocksdb_transactiondb_t* txn_db, const rocksdb_flushoptions_t* options, + rocksdb_column_family_handle_t** column_families, int num_column_families, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_flush_wal( + rocksdb_transactiondb_t* txn_db, unsigned char sync, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_checkpoint_t* +rocksdb_transactiondb_checkpoint_object_create(rocksdb_transactiondb_t* txn_db, + char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_optimistictransactiondb_t* +rocksdb_optimistictransactiondb_open(const rocksdb_options_t* options, + const char* name, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_optimistictransactiondb_t* +rocksdb_optimistictransactiondb_open_column_families( + const rocksdb_options_t* options, const char* name, int num_column_families, + const char* const* column_family_names, + const rocksdb_options_t* const* column_family_options, + rocksdb_column_family_handle_t** column_family_handles, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* +rocksdb_optimistictransactiondb_get_base_db( + rocksdb_optimistictransactiondb_t* otxn_db); + +extern ROCKSDB_LIBRARY_API void rocksdb_optimistictransactiondb_close_base_db( + rocksdb_t* base_db); + +extern ROCKSDB_LIBRARY_API rocksdb_transaction_t* +rocksdb_optimistictransaction_begin( + rocksdb_optimistictransactiondb_t* otxn_db, + const rocksdb_writeoptions_t* write_options, + const rocksdb_optimistictransaction_options_t* otxn_options, + rocksdb_transaction_t* old_txn); + +extern ROCKSDB_LIBRARY_API void rocksdb_optimistictransactiondb_write( + rocksdb_optimistictransactiondb_t* otxn_db, + const rocksdb_writeoptions_t* options, rocksdb_writebatch_t* batch, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_optimistictransactiondb_close( + rocksdb_optimistictransactiondb_t* otxn_db); + +extern ROCKSDB_LIBRARY_API rocksdb_checkpoint_t* +rocksdb_optimistictransactiondb_checkpoint_object_create( + rocksdb_optimistictransactiondb_t* otxn_db, char** errptr); + +/* Transaction Options */ + +extern ROCKSDB_LIBRARY_API rocksdb_transactiondb_options_t* +rocksdb_transactiondb_options_create(void); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_options_destroy( + rocksdb_transactiondb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_options_set_max_num_locks( + rocksdb_transactiondb_options_t* opt, int64_t max_num_locks); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_options_set_num_stripes( + rocksdb_transactiondb_options_t* opt, size_t num_stripes); + +extern ROCKSDB_LIBRARY_API void +rocksdb_transactiondb_options_set_transaction_lock_timeout( + rocksdb_transactiondb_options_t* opt, int64_t txn_lock_timeout); + +extern ROCKSDB_LIBRARY_API void +rocksdb_transactiondb_options_set_default_lock_timeout( + rocksdb_transactiondb_options_t* opt, int64_t default_lock_timeout); + +extern ROCKSDB_LIBRARY_API rocksdb_transaction_options_t* +rocksdb_transaction_options_create(void); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_destroy( + rocksdb_transaction_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_set_set_snapshot( + rocksdb_transaction_options_t* opt, unsigned char v); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_set_deadlock_detect( + rocksdb_transaction_options_t* opt, unsigned char v); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_set_lock_timeout( + rocksdb_transaction_options_t* opt, int64_t lock_timeout); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_set_expiration( + rocksdb_transaction_options_t* opt, int64_t expiration); + +extern ROCKSDB_LIBRARY_API void +rocksdb_transaction_options_set_deadlock_detect_depth( + rocksdb_transaction_options_t* opt, int64_t depth); + +extern ROCKSDB_LIBRARY_API void +rocksdb_transaction_options_set_max_write_batch_size( + rocksdb_transaction_options_t* opt, size_t size); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_set_skip_prepare( + rocksdb_transaction_options_t* opt, unsigned char v); + +extern ROCKSDB_LIBRARY_API rocksdb_optimistictransaction_options_t* +rocksdb_optimistictransaction_options_create(void); + +extern ROCKSDB_LIBRARY_API void rocksdb_optimistictransaction_options_destroy( + rocksdb_optimistictransaction_options_t* opt); + +extern ROCKSDB_LIBRARY_API void +rocksdb_optimistictransaction_options_set_set_snapshot( + rocksdb_optimistictransaction_options_t* opt, unsigned char v); + +extern ROCKSDB_LIBRARY_API char* rocksdb_optimistictransactiondb_property_value( + rocksdb_optimistictransactiondb_t* db, const char* propname); + +extern ROCKSDB_LIBRARY_API int rocksdb_optimistictransactiondb_property_int( + rocksdb_optimistictransactiondb_t* db, const char* propname, + uint64_t* out_val); + +// referring to convention (3), this should be used by client +// to free memory that was malloc()ed +extern ROCKSDB_LIBRARY_API void rocksdb_free(void* ptr); + +extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* rocksdb_get_pinned( + rocksdb_t* db, const rocksdb_readoptions_t* options, const char* key, + size_t keylen, char** errptr); +extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* rocksdb_get_pinned_cf( + rocksdb_t* db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_pinnableslice_destroy( + rocksdb_pinnableslice_t* v); +extern ROCKSDB_LIBRARY_API const char* rocksdb_pinnableslice_value( + const rocksdb_pinnableslice_t* t, size_t* vlen); + +extern ROCKSDB_LIBRARY_API rocksdb_memory_consumers_t* +rocksdb_memory_consumers_create(void); +extern ROCKSDB_LIBRARY_API void rocksdb_memory_consumers_add_db( + rocksdb_memory_consumers_t* consumers, rocksdb_t* db); +extern ROCKSDB_LIBRARY_API void rocksdb_memory_consumers_add_cache( + rocksdb_memory_consumers_t* consumers, rocksdb_cache_t* cache); +extern ROCKSDB_LIBRARY_API void rocksdb_memory_consumers_destroy( + rocksdb_memory_consumers_t* consumers); +extern ROCKSDB_LIBRARY_API rocksdb_memory_usage_t* +rocksdb_approximate_memory_usage_create(rocksdb_memory_consumers_t* consumers, + char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_approximate_memory_usage_destroy( + rocksdb_memory_usage_t* usage); + +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_approximate_memory_usage_get_mem_table_total( + rocksdb_memory_usage_t* memory_usage); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_approximate_memory_usage_get_mem_table_unflushed( + rocksdb_memory_usage_t* memory_usage); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_approximate_memory_usage_get_mem_table_readers_total( + rocksdb_memory_usage_t* memory_usage); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_approximate_memory_usage_get_cache_total( + rocksdb_memory_usage_t* memory_usage); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_dump_malloc_stats( + rocksdb_options_t*, unsigned char); + +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_memtable_whole_key_filtering(rocksdb_options_t*, + unsigned char); + +extern ROCKSDB_LIBRARY_API void rocksdb_cancel_all_background_work( + rocksdb_t* db, unsigned char wait); + +extern ROCKSDB_LIBRARY_API void rocksdb_disable_manual_compaction( + rocksdb_t* db); + +extern ROCKSDB_LIBRARY_API void rocksdb_enable_manual_compaction(rocksdb_t* db); + +#ifdef __cplusplus +} /* end extern "C" */ +#endif diff --git a/docs/assets/TDengine-logo-trans-small.png b/docs/assets/TDengine-logo-trans-small.png new file mode 100644 index 0000000000000000000000000000000000000000..1cf890facd14839529e76d2ff24f532235aa58ef Binary files /dev/null and b/docs/assets/TDengine-logo-trans-small.png differ diff --git a/docs/assets/TDengine-logo-trans.png b/docs/assets/TDengine-logo-trans.png new file mode 100644 index 0000000000000000000000000000000000000000..85f55ad3b98f1deb472145788311951b2b89af9c Binary files /dev/null and b/docs/assets/TDengine-logo-trans.png differ diff --git a/docs/en/05-get-started/03-package.md b/docs/en/05-get-started/03-package.md index b47855103c6eb4d00405bae60a688e4a0a6fc50c..5a54c32a5137cdfdf25b6b6eca25a265c72c9242 100644 --- a/docs/en/05-get-started/03-package.md +++ b/docs/en/05-get-started/03-package.md @@ -20,6 +20,19 @@ The standard server installation package includes `taos`, `taosd`, `taosAdapter` The TDengine Community Edition is released as Deb and RPM packages. The Deb package can be installed on Debian, Ubuntu, and derivative systems. The RPM package can be installed on CentOS, RHEL, SUSE, and derivative systems. A .tar.gz package is also provided for enterprise customers, and you can install TDengine over `apt-get` as well. The .tar.tz package includes `taosdump` and the TDinsight installation script. If you want to use these utilities with the Deb or RPM package, download and install taosTools separately. TDengine can also be installed on x64 Windows and x64/m1 macOS. +## Operating environment requirements +In the Linux system, the minimum requirements for the operating environment are as follows: + +linux core version - 3.10.0-1160.83.1.el7.x86_64; + +glibc version - 2.17; + +If compiling and installing through clone source code, it is also necessary to meet the following requirements: + +cmake version - 3.26.4 or above; + +gcc version - 9.3.1 or above; + ## Installation diff --git a/docs/en/07-develop/08-cache.md b/docs/en/07-develop/08-cache.md index 6a6ca3e5947b6a3233f90e5e0a01417e6be3b19d..9ef5d2c4f0c7697a1bdbb45c84ff28d3531417d8 100644 --- a/docs/en/07-develop/08-cache.md +++ b/docs/en/07-develop/08-cache.md @@ -10,10 +10,10 @@ TDengine uses various kinds of caching techniques to efficiently write and query TDengine uses an insert-driven cache management policy, known as first in, first out (FIFO). This policy differs from read-driven "least recently used (LRU)" cache management. A FIFO policy stores the latest data in cache and flushes the oldest data from cache to disk when the cache usage reaches a threshold. In IoT use cases, the most recent data or the current state is most important. The cache policy in TDengine, like much of the design and architecture of TDengine, is based on the nature of IoT data. -When you create a database, you can configure the size of the write cache on each vnode. The **vgroups** parameter determines the number of vgroups that process data in the database, and the **buffer** parameter determines the size of the write cache for each vnode. +When you create a database, you can configure the size of the write cache on each vnode. The **vgroups** parameter determines the number of vgroups that process data in the database, and the **buffer** parameter determines the size of the write cache for each vnode. The unit of buffer is MB. ```sql -create database db0 vgroups 100 buffer 16MB +create database db0 vgroups 100 buffer 16 ``` In theory, larger cache sizes are always better. However, at a certain point, it becomes impossible to improve performance by increasing cache size. In most scenarios, you can retain the default cache settings. @@ -28,10 +28,10 @@ When you create a database, you can configure whether the latest data from every ## Metadata Cache -To improve query and write performance, each vnode caches the metadata that it receives. When you create a database, you can configure the size of the metadata cache through the *pages* and *pagesize* parameters. +To improve query and write performance, each vnode caches the metadata that it receives. When you create a database, you can configure the size of the metadata cache through the *pages* and *pagesize* parameters. The unit of pagesize is kb. ```sql -create database db0 pages 128 pagesize 16kb +create database db0 pages 128 pagesize 16 ``` The preceding SQL statement creates 128 pages on each vnode in the `db0` database. Each page has a 16 KB metadata cache. diff --git a/docs/en/12-taos-sql/22-meta.md b/docs/en/12-taos-sql/22-meta.md index bd745b1c6f9bbe3f3dfad65fe4664c7a136e722d..4123bdfb586ef09ff99ccc3b4d2d6f7e541765e1 100644 --- a/docs/en/12-taos-sql/22-meta.md +++ b/docs/en/12-taos-sql/22-meta.md @@ -81,7 +81,7 @@ Provides information about user-created databases. Similar to SHOW DATABASES. | 3 | ntables | INT | Number of standard tables and subtables (not including supertables) | | 4 | vgroups | INT | Number of vgroups. It should be noted that `vnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 6 | replica | INT | Number of replicas. It should be noted that `replica` is a TDengine keyword and needs to be escaped with ` when used as a column name. | -| 7 | strict | BINARY(3) | Strong consistency. It should be noted that `strict` is a TDengine keyword and needs to be escaped with ` when used as a column name. | +| 7 | strict | BINARY(4) | Obsoleted | | 8 | duration | INT | Duration for storage of single files. It should be noted that `duration` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 9 | keep | INT | Data retention period. It should be noted that `keep` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 10 | buffer | INT | Write cache size per vnode, in MB. It should be noted that `buffer` is a TDengine keyword and needs to be escaped with ` when used as a column name. | diff --git a/docs/en/14-reference/03-connector/04-java.mdx b/docs/en/14-reference/03-connector/04-java.mdx index 9c5a852c7003cd8d0a5aebe7849a8988656322bd..ebd2891a9ee444b4c1649385bb94b35b698cc52d 100644 --- a/docs/en/14-reference/03-connector/04-java.mdx +++ b/docs/en/14-reference/03-connector/04-java.mdx @@ -288,6 +288,7 @@ The configuration parameters in the URL are as follows: - httpSocketTimeout: socket timeout in milliseconds, the default value is 5000 ms. It only takes effect when batchfetch is false. - messageWaitTimeout: message transmission timeout in milliseconds, the default value is 3000 ms. It only takes effect when batchfetch is true. - useSSL: connecting Securely Using SSL. true: using SSL connection, false: not using SSL connection. +- httpPoolSize: size of REST concurrent requests. The default value is 20. **Note**: Some configuration items (e.g., locale, timezone) do not work in the REST connection. @@ -355,6 +356,7 @@ The configuration parameters in properties are as follows. - TSDBDriver.HTTP_SOCKET_TIMEOUT: socket timeout in milliseconds, the default value is 5000 ms. It only takes effect when using JDBC REST connection and batchfetch is false. - TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: message transmission timeout in milliseconds, the default value is 3000 ms. It only takes effect when using JDBC REST connection and batchfetch is true. - TSDBDriver.PROPERTY_KEY_USE_SSL: connecting Securely Using SSL. true: using SSL connection, false: not using SSL connection. It only takes effect when using JDBC REST connection. +- TSDBDriver.HTTP_POOL_SIZE: size of REST concurrent requests. The default value is 20. For JDBC native connections, you can specify other parameters, such as log level, SQL length, etc., by specifying URL and Properties. For more detailed configuration, please refer to [Client Configuration](/reference/config/#Client-Only). ### Priority of configuration parameters @@ -419,6 +421,19 @@ while(resultSet.next()){ > The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, you have to start from 1. However, we recommend using the field names to get the values of the fields in the result set. +### execute SQL with reqId + +This reqId can be used to request link tracing. + +```java +AbstractStatement aStmt = (AbstractStatement) connection.createStatement(); +aStmt.execute("create database if not exists db", 1L); +aStmt.executeUpdate("use db", 2L); +try (ResultSet rs = aStmt.executeQuery("select * from tb", 3L)) { + Timestamp ts = rs.getTimestamp(1); +} +``` + ### Writing data via parameter binding TDengine has significantly improved the bind APIs to support data writing (INSERT) scenarios. Writing data in this way avoids the resource consumption of SQL syntax parsing, resulting in significant write performance improvements in many cases. @@ -936,6 +951,14 @@ public class SchemalessWsTest { +### Schemaless with reqId + +This reqId can be used to request link tracing. + +```java +writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS, 1L); +``` + ### Data Subscription The TDengine Java Connector supports subscription functionality with the following application API. @@ -993,7 +1016,7 @@ while(true) { #### Assignment subscription Offset -``` +```java long position(TopicPartition partition) throws SQLException; Map position(String topic) throws SQLException; Map beginningOffsets(String topic) throws SQLException; @@ -1002,6 +1025,29 @@ Map endOffsets(String topic) throws SQLException; void seek(TopicPartition partition, long offset) throws SQLException; ``` +Example usage is as follows. + +```java +String topic = "offset_seek_test"; +Map offset = null; +try (TaosConsumer consumer = new TaosConsumer<>(properties)) { + consumer.subscribe(Collections.singletonList(topic)); + for (int i = 0; i < 10; i++) { + if (i == 3) { + // Saving consumption position + offset = consumer.position(topic); + } + if (i == 5) { + // reset consumption to the previously saved position + for (Map.Entry entry : offset.entrySet()) { + consumer.seek(entry.getKey(), entry.getValue()); + } + } + ConsumerRecords records = consumer.poll(Duration.ofMillis(500)); + } +} +``` + #### Close subscriptions ```java @@ -1308,3 +1354,7 @@ For additional troubleshooting, see [FAQ](../../../train-faq/faq). ## API Reference [taos-jdbcdriver doc](https://docs.taosdata.com/api/taos-jdbcdriver) + +``` + +``` diff --git a/docs/en/14-reference/03-connector/07-python.mdx b/docs/en/14-reference/03-connector/07-python.mdx index aa1186fa52058aa88412546bfa252e43f1a3fcf9..461bdfbf162e696b430c1edb9b09ada70e086fb9 100644 --- a/docs/en/14-reference/03-connector/07-python.mdx +++ b/docs/en/14-reference/03-connector/07-python.mdx @@ -24,6 +24,36 @@ The source code for the Python connector is hosted on [GitHub](https://github.co We recommend using the latest version of `taospy`, regardless of the version of TDengine. +## Handling Exceptions + +There are 4 types of exception in python connector. + +- The exception of Python Connector itself. +- The exception of native library. +- The exception of websocket +- The exception of subscription. +- The exception of other TDengine function modules. + +|Error Type|Description|Suggested Actions| +|:--------:|:---------:|:---------------:| +|InterfaceError|the native library is too old that it cannot support the function|please check the TDengine client version| +|ConnectionError|connection error|please check TDengine's status and the connection params| +|DatabaseError|database error|please upgrade Python connector to latest| +|OperationalError|operation error|| +|ProgrammingError||| +|StatementError|the exception of stmt|| +|ResultError||| +|SchemalessError|the exception of stmt schemaless|| +|TmqError|the exception of stmt tmq|| + +It usually uses try-expect to handle exceptions in python. For exception handling, please refer to [Python Errors and Exceptions Documentation](https://docs.python.org/3/tutorial/errors.html). + +All exceptions from the Python Connector are thrown directly. Applications should handle these exceptions. For example: + +```python +{{#include docs/examples/python/handle_exception.py}} +``` + ## Supported features - Native connections support all the core features of TDengine, including connection management, SQL execution, bind interface, subscriptions, and schemaless writing. @@ -343,6 +373,8 @@ For a more detailed description of the `sql()` method, please refer to [RestClie +The `Connection` class contains both an implementation of the PEP249 Connection interface (e.g., the `cursor()` method and the `close()` method) and many extensions (e.g., the `execute()`, `query()`, `schemaless_insert()`, and `subscribe()` methods). + ```python {{#include docs/examples/python/connect_websocket_examples.py:basic}} ``` @@ -353,6 +385,46 @@ For a more detailed description of the `sql()` method, please refer to [RestClie +### Querying Data + + + + +The `query` method of the `TaosConnection` class can be used to query data and return the result data of type `TaosResult`. + +```python +{{#include docs/examples/python/connection_usage_native_reference.py:query}} +``` + +:::tip +The queried results can only be fetched once. For example, only one of `fetch_all()` and `fetch_all_into_dict()` can be used in the example above. Repeated fetches will result in an empty list. +::: + + + + + +The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result. + +```python +{{#include docs/examples/python/rest_client_example.py}} +``` + +For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html). + + + + + +The `query` method of the `TaosConnection` class can be used to query data and return the result data of type `TaosResult`. + +```python +{{#include docs/examples/python/connect_websocket_examples.py:basic}} +``` + + + + ### Usage with req_id By using the optional req_id parameter, you can specify a request ID that can be used for tracing. @@ -811,14 +883,6 @@ bind multiple rows at once | ## Other notes -### Exception handling - -All errors from database operations are thrown directly as exceptions and the error message from the database is passed up the exception stack. The application is responsible for exception handling. For example: - -```python -{{#include docs/examples/python/handle_exception.py}} -``` - ### About nanoseconds Due to the current imperfection of Python's nanosecond support (see link below), the current implementation returns integers at nanosecond precision instead of the `datetime` type produced by `ms` and `us`, which application developers will need to handle on their own. And it is recommended to use pandas' to_datetime(). The Python Connector may modify the interface in the future if Python officially supports nanoseconds in full. diff --git a/docs/en/14-reference/03-connector/index.mdx b/docs/en/14-reference/03-connector/index.mdx index a35d5bc2d1ed4a69f9750a1153d15efe815f674d..41206931181f18063ad1701978a6abe26fc1f5f8 100644 --- a/docs/en/14-reference/03-connector/index.mdx +++ b/docs/en/14-reference/03-connector/index.mdx @@ -48,7 +48,6 @@ Comparing the connector support for TDengine functional features as follows. | **Parameter Binding** | Support | Support | Support | Support | Support | Support | | **Subscription (TMQ)** | Support | Support | Support | Support | Support | Support | | **Schemaless** | Support | Support | Support | Support | Support | Support | -| **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported | :::info The different database framework specifications for various programming languages do not mean that all C/C++ interfaces need a wrapper. @@ -60,11 +59,10 @@ The different database framework specifications for various programming language | -------------------------------------- | ------------- | --------------- | ------------- | ------------- | ------------- | ------------- | | **Connection Management** | Support | Support | Support | Support | Support | Support | | **Regular Query** | Support | Support | Support | Support | Support | Support | -| **Parameter Binding** | Not Supported | Not Supported | Support | Support | Not Supported | Support | +| **Parameter Binding** | Supported | Not Supported | Support | Support | Not Supported | Support | | **Subscription (TMQ) ** | Supported | Support | Support | Not Supported | Not Supported | Support | -| **Schemaless** | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported | +| **Schemaless** | Supported | Not Supported | Supported | Not Supported | Not Supported | Not Supported | | **Bulk Pulling (based on WebSocket) ** | Support | Support | Support | Support | Support | Support | -| **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported | :::warning diff --git a/docs/en/14-reference/12-config/index.md b/docs/en/14-reference/12-config/index.md index bfc5aabe7bb13b3fada56e1c28c5b5970f734ed8..cbff7301d2d1658184d0d5614c2b522b7e4ce91c 100755 --- a/docs/en/14-reference/12-config/index.md +++ b/docs/en/14-reference/12-config/index.md @@ -19,14 +19,18 @@ taosd -C ## Configuration File on Client Side -TDengine CLI `taos` is the tool for users to interact with TDengine. It can share same configuration file as `taosd` or use a separate configuration file. When launching `taos`, parameter `-c` can be used to specify the location where its configuration file is. For example `taos -c /home/cfg` means `/home/cfg/taos.cfg` will be used. If `-c` is not used, the default location of the configuration file is `/etc/taos`. For more details please use `taos --help` to get. +TDengine CLI `taos` is the tool for users to interact with TDengine. It can share same configuration file as `taosd` or use a separate configuration file. When launching `taos`, parameter `-c` can be used to specify the location where its configuration file is. For example: -```bash -taos -C ``` +taos -c /home/cfg +``` + +means `/home/cfg/taos.cfg` will be used. If `-c` is not used, the default location of the configuration file is `/etc/taos`. For more details please use `taos --help` to get. + +Parameter `-C` can be used on the CLI of `taos` to show its configuration, like below: ```bash -taos --dump-config +taos -C ``` ## Configuration Parameters @@ -77,8 +81,9 @@ The parameters described in this document by the effect that they have on the sy | Default Value | 6030 | :::note -- Ensure that your firewall rules do not block TCP port 6042 on any host in the cluster. Below table describes the ports used by TDengine in details. +Ensure that your firewall rules do not block TCP port 6042 on any host in the cluster. Below table describes the ports used by TDengine in details. ::: + | Protocol | Default Port | Description | How to configure | | :------- | :----------- | :-------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------------- | | TCP | 6030 | Communication between client and server. In a multi-node cluster, communication between nodes. serverPort | @@ -120,6 +125,8 @@ The parameters described in this document by the effect that they have on the sy :::note Please note the `taoskeeper` needs to be installed and running to create the `log` database and receiving metrics sent by `taosd` as the full monitoring solution. +::: + ### monitor | Attribute | Description | diff --git a/docs/en/20-third-party/14-dbeaver.md b/docs/en/20-third-party/14-dbeaver.md new file mode 100644 index 0000000000000000000000000000000000000000..1882e125039c5a242960061e5d00871c721e7651 --- /dev/null +++ b/docs/en/20-third-party/14-dbeaver.md @@ -0,0 +1,61 @@ +--- +sidebar_label: DBeaver +title: DBeaver +description: You can use DBeaver to access your data stored in TDengine and TDengine Cloud. +--- + +[DBeaver](https://dbeaver.io/) is a popular cross-platform database management tool that facilitates data management for developers, database administrators, data analysts, and other users. Starting from version 23.1.1, DBeaver natively supports TDengine and can be used to manage TDengine Cloud as well as TDengine clusters deployed on-premises. + +## Prerequisites + +To use DBeaver to manage TDengine, you need to prepare the following: + +- Install DBeaver. DBeaver supports mainstream operating systems including Windows, macOS, and Linux. Please make sure you download and install the correct version (23.1.1+) and platform package. Please refer to the [official DBeaver documentation](https://github.com/dbeaver/dbeaver/wiki/Installation) for detailed installation steps. +- If you use an on-premises TDengine cluster, please make sure that TDengine and taosAdapter are deployed and running properly. For detailed information, please refer to the taosAdapter User Manual. +- If you use TDengine Cloud, please [register](https://cloud.tdengine.com/) for an account. + +## Usage + +### Use DBeaver to access on-premises TDengine cluster + +1. Start the DBeaver application, click the button or menu item to choose **New Database Connection**, and then select **TDengine** in the **Timeseries** category. + +![Connect TDengine with DBeaver](./dbeaver/dbeaver-connect-tdengine-en.webp) + +2. Configure the TDengine connection by filling in the host address, port number, username, and password. If TDengine is deployed on the local machine, you are only required to fill in the username and password. The default username is root and the default password is taosdata. Click **Test Connection** to check whether the connection is workable. If you do not have the TDengine Java connector installed on the local machine, DBeaver will prompt you to download and install it. + +![Configure the TDengine connection](./dbeaver/dbeaver-config-tdengine-en.webp)) + +3. If the connection is successful, it will be displayed as shown in the following figure. If the connection fails, please check whether the TDengine service and taosAdapter are running correctly and whether the host address, port number, username, and password are correct. + +![Connection successful](./dbeaver/dbeaver-connect-tdengine-test-en.webp) + +4. Use DBeaver to select databases and tables and browse your data stored in TDengine. + +![Browse TDengine data with DBeaver](./dbeaver/dbeaver-browse-data-en.webp) + +5. You can also manipulate TDengine data by executing SQL commands. + +![Use SQL commands to manipulate TDengine data in DBeaver](./dbeaver/dbeaver-sql-execution-en.webp) + +### Use DBeaver to access TDengine Cloud + +1. Log in to the TDengine Cloud service, select **Programming** > **Java** in the management console, and then copy the string value of `TDENGINE_JDBC_URL` displayed in the **Config** section. + +![Copy JDBC URL from TDengine Cloud](./dbeaver/tdengine-cloud-jdbc-dsn-en.webp) + +2. Start the DBeaver application, click the button or menu item to choose **New Database Connection**, and then select **TDengine Cloud** in the **Timeseries** category. + +![Connect TDengine Cloud with DBeaver](./dbeaver/dbeaver-connect-tdengine-cloud-en.webp) + +3. Configure the TDengine Cloud connection by filling in the JDBC URL value. Click **Test Connection**. If you do not have the TDengine Java connector installed on the local machine, DBeaver will prompt you to download and install it. If the connection is successful, it will be displayed as shown in the following figure. If the connection fails, please check whether the TDengine Cloud service is running properly and whether the JDBC URL is correct. + +![Configure the TDengine Cloud connection](./dbeaver/dbeaver-connect-tdengine-cloud-test-en.webp) + +4. Use DBeaver to select databases and tables and browse your data stored in TDengine Cloud. + +![Browse TDengine Cloud data with DBeaver](./dbeaver/dbeaver-browse-data-cloud-en.webp) + +5. You can also manipulate TDengine Cloud data by executing SQL commands. + +![Use SQL commands to manipulate TDengine Cloud data in DBeaver](./dbeaver/dbeaver-sql-execution-cloud-en.webp) diff --git a/docs/en/20-third-party/dbeaver/dbeaver-browse-data-cloud-en.webp b/docs/en/20-third-party/dbeaver/dbeaver-browse-data-cloud-en.webp new file mode 100644 index 0000000000000000000000000000000000000000..84424284119ed688e8a5657e2d1be373fc12485e Binary files /dev/null and b/docs/en/20-third-party/dbeaver/dbeaver-browse-data-cloud-en.webp differ diff --git a/docs/en/20-third-party/dbeaver/dbeaver-browse-data-en.webp b/docs/en/20-third-party/dbeaver/dbeaver-browse-data-en.webp new file mode 100644 index 0000000000000000000000000000000000000000..bb2f05a9a1306794601e9d3a537bbb27e7e88823 Binary files /dev/null and b/docs/en/20-third-party/dbeaver/dbeaver-browse-data-en.webp differ diff --git a/docs/en/20-third-party/dbeaver/dbeaver-config-tdengine-en.webp b/docs/en/20-third-party/dbeaver/dbeaver-config-tdengine-en.webp new file mode 100644 index 0000000000000000000000000000000000000000..ff59e96029854b5a8c89460829dda62db8cc1a0a Binary files /dev/null and b/docs/en/20-third-party/dbeaver/dbeaver-config-tdengine-en.webp differ diff --git a/docs/en/20-third-party/dbeaver/dbeaver-connect-tdengine-cloud-en.webp b/docs/en/20-third-party/dbeaver/dbeaver-connect-tdengine-cloud-en.webp new file mode 100644 index 0000000000000000000000000000000000000000..56b2938c9b9cf44f5f03cc0ea2dbeab5843d9086 Binary files /dev/null and b/docs/en/20-third-party/dbeaver/dbeaver-connect-tdengine-cloud-en.webp differ diff --git a/docs/en/20-third-party/dbeaver/dbeaver-connect-tdengine-cloud-test-en.webp b/docs/en/20-third-party/dbeaver/dbeaver-connect-tdengine-cloud-test-en.webp new file mode 100644 index 0000000000000000000000000000000000000000..c15228c8d34426879f8c7198bf8603b7483fff1d Binary files /dev/null and b/docs/en/20-third-party/dbeaver/dbeaver-connect-tdengine-cloud-test-en.webp differ diff --git a/docs/en/20-third-party/dbeaver/dbeaver-connect-tdengine-en.webp b/docs/en/20-third-party/dbeaver/dbeaver-connect-tdengine-en.webp new file mode 100644 index 0000000000000000000000000000000000000000..9dc8286773e5dc0fa8f2d92763dbbcb4e85eecac Binary files /dev/null and b/docs/en/20-third-party/dbeaver/dbeaver-connect-tdengine-en.webp differ diff --git a/docs/en/20-third-party/dbeaver/dbeaver-connect-tdengine-test-en.webp b/docs/en/20-third-party/dbeaver/dbeaver-connect-tdengine-test-en.webp new file mode 100644 index 0000000000000000000000000000000000000000..d22d02e82047d97801a3669fedb22d84191b77ca Binary files /dev/null and b/docs/en/20-third-party/dbeaver/dbeaver-connect-tdengine-test-en.webp differ diff --git a/docs/en/20-third-party/dbeaver/dbeaver-sql-execution-cloud-en.webp b/docs/en/20-third-party/dbeaver/dbeaver-sql-execution-cloud-en.webp new file mode 100644 index 0000000000000000000000000000000000000000..6938c970ac97577798ce7dfe6443f144df54ce9e Binary files /dev/null and b/docs/en/20-third-party/dbeaver/dbeaver-sql-execution-cloud-en.webp differ diff --git a/docs/en/20-third-party/dbeaver/dbeaver-sql-execution-en.webp b/docs/en/20-third-party/dbeaver/dbeaver-sql-execution-en.webp new file mode 100644 index 0000000000000000000000000000000000000000..1f3fc19571f242db918da525ed7da9120847cab3 Binary files /dev/null and b/docs/en/20-third-party/dbeaver/dbeaver-sql-execution-en.webp differ diff --git a/docs/en/20-third-party/dbeaver/tdengine-cloud-jdbc-dsn-en.webp b/docs/en/20-third-party/dbeaver/tdengine-cloud-jdbc-dsn-en.webp new file mode 100644 index 0000000000000000000000000000000000000000..7a8ed8f6d3ab1de3051137c7880f641640746c2f Binary files /dev/null and b/docs/en/20-third-party/dbeaver/tdengine-cloud-jdbc-dsn-en.webp differ diff --git a/docs/en/27-train-faq/01-faq.md b/docs/en/27-train-faq/01-faq.md index aa28303f5d10e69a8446a2511d288c67ecc8ac02..715704a0c322c7ec22926e95896cafef5677aa1a 100644 --- a/docs/en/27-train-faq/01-faq.md +++ b/docs/en/27-train-faq/01-faq.md @@ -56,7 +56,7 @@ This error indicates that the client could not connect to the server. Perform th 7. If you are using the Python, Java, Go, Rust, C#, or Node.js connector on Linux to connect to the server, verify that `libtaos.so` is in the `/usr/local/taos/driver` directory and `/usr/local/taos/driver` is in the `LD_LIBRARY_PATH` environment variable. -8. If you are using macOS, verify that `libtaos.dylib` is in the `/usr/local/lib` directory and `/usr/local/lib` is in the `LD_LIBRARY_PATH` environment variable.. +8. If you are using macOS, verify that `libtaos.dylib` is in the `/usr/local/lib` directory and `/usr/local/lib` is in the `DYLD_LIBRARY_PATH` environment variable.. 9. If you are using Windows, verify that `C:\TDengine\driver\taos.dll` is in the `PATH` environment variable. If possible, move `taos.dll` to the `C:\Windows\System32` directory. diff --git a/docs/zh/05-get-started/03-package.md b/docs/zh/05-get-started/03-package.md index bab6377c7e1781c614e240b60f52d0c0481e65e7..f6d1c85a60ba5bbd08b122266ca42815a58d094c 100644 --- a/docs/zh/05-get-started/03-package.md +++ b/docs/zh/05-get-started/03-package.md @@ -16,6 +16,20 @@ TDengine 完整的软件包包括服务端(taosd)、应用驱动(taosc) 在 Linux 系统上,TDengine 社区版提供 Deb 和 RPM 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 Deb 支持 Debian/Ubuntu 及其衍生系统,RPM 支持 CentOS/RHEL/SUSE 及其衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包,也支持通过 `apt-get` 工具从线上进行安装。需要注意的是,RPM 和 Deb 包不含 `taosdump` 和 TDinsight 安装脚本,这些工具需要通过安装 taosTools 包获得。TDengine 也提供 Windows x64 平台和 macOS x64/m1 平台的安装包。 +## 运行环境要求 +在linux系统中,运行环境最低要求如下: + +linux 内核版本 - 3.10.0-1160.83.1.el7.x86_64; + +glibc 版本 - 2.17; + +如果通过clone源码进行编译安装,还需要满足: + +cmake版本 - 3.26.4或以上; + +gcc 版本 - 9.3.1或以上; + + ## 安装 diff --git a/docs/zh/07-develop/08-cache.md b/docs/zh/07-develop/08-cache.md index 29e28e3dde0816d9e5a08f74abd2382854d336da..07479b9a27e12745101898e9a8e046b5f169adc1 100644 --- a/docs/zh/07-develop/08-cache.md +++ b/docs/zh/07-develop/08-cache.md @@ -10,10 +10,10 @@ description: "TDengine 内部的缓存设计" TDengine 采用时间驱动缓存管理策略(First-In-First-Out,FIFO),又称为写驱动的缓存管理机制。这种策略有别于读驱动的数据缓存模式(Least-Recent-Used,LRU),直接将最近写入的数据保存在系统的缓存中。当缓存达到临界值的时候,将最早的数据批量写入磁盘。一般意义上来说,对于物联网数据的使用,用户最为关心最近产生的数据,即当前状态。TDengine 充分利用了这一特性,将最近到达的(当前状态)数据保存在缓存中。 -每个 vnode 的写入缓存大小在创建数据库时决定,创建数据库时的两个关键参数 vgroups 和 buffer 分别决定了该数据库中的数据由多少个 vgroup 处理,以及向其中的每个 vnode 分配多少写入缓存。 +每个 vnode 的写入缓存大小在创建数据库时决定,创建数据库时的两个关键参数 vgroups 和 buffer 分别决定了该数据库中的数据由多少个 vgroup 处理,以及向其中的每个 vnode 分配多少写入缓存。buffer 的单位是MB。 ```sql -create database db0 vgroups 100 buffer 16MB +create database db0 vgroups 100 buffer 16 ``` 理论上缓存越大越好,但超过一定阈值后再增加缓存对写入性能提升并无帮助,一般情况下使用默认值即可。 @@ -28,10 +28,10 @@ create database db0 vgroups 100 buffer 16MB ## 元数据缓存 -为了更高效地处理查询和写入,每个 vnode 都会缓存自己曾经获取到的元数据。元数据缓存由创建数据库时的两个参数 pages 和 pagesize 决定。 +为了更高效地处理查询和写入,每个 vnode 都会缓存自己曾经获取到的元数据。元数据缓存由创建数据库时的两个参数 pages 和 pagesize 决定。pagesize 的单位是 kb。 ```sql -create database db0 pages 128 pagesize 16kb +create database db0 pages 128 pagesize 16 ``` 上述语句会为数据库 db0 的每个 vnode 创建 128 个 page,每个 page 16kb 的元数据缓存。 diff --git a/docs/zh/08-connector/14-java.mdx b/docs/zh/08-connector/14-java.mdx index 1588159b570a04659735f085ecdf7c8822c0e43c..27b732b8835c2290c6cc1e55c35cb6e69f3b957d 100644 --- a/docs/zh/08-connector/14-java.mdx +++ b/docs/zh/08-connector/14-java.mdx @@ -291,6 +291,7 @@ url 中的配置参数如下: - httpSocketTimeout: socket 超时时间,单位 ms,默认值为 5000。仅在 batchfetch 设置为 false 时生效。 - messageWaitTimeout: 消息超时时间, 单位 ms, 默认值为 3000。 仅在 batchfetch 设置为 true 时生效。 - useSSL: 连接中是否使用 SSL。 +- httpPoolSize: REST 并发请求大小,默认 20。 **注意**:部分配置项(比如:locale、timezone)在 REST 连接中不生效。 @@ -358,6 +359,7 @@ properties 中的配置参数如下: - TSDBDriver.HTTP_SOCKET_TIMEOUT: socket 超时时间,单位 ms,默认值为 5000。仅在 REST 连接且 batchfetch 设置为 false 时生效。 - TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: 消息超时时间, 单位 ms, 默认值为 3000。 仅在 REST 连接且 batchfetch 设置为 true 时生效。 - TSDBDriver.PROPERTY_KEY_USE_SSL: 连接中是否使用 SSL。仅在 REST 连接时生效。 +- TSDBDriver.HTTP_POOL_SIZE: REST 并发请求大小,默认 20。 此外对 JDBC 原生连接,通过指定 URL 和 Properties 还可以指定其他参数,比如日志级别、SQL 长度等。更多详细配置请参考[客户端配置](/reference/config/#仅客户端适用)。 ### 配置参数的优先级 @@ -422,6 +424,19 @@ while(resultSet.next()){ > 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。 +### 执行带有 reqId 的 SQL + +此 reqId 可用于请求链路追踪。 + +```java +AbstractStatement aStmt = (AbstractStatement) connection.createStatement(); +aStmt.execute("create database if not exists db", 1L); +aStmt.executeUpdate("use db", 2L); +try (ResultSet rs = aStmt.executeQuery("select * from tb", 3L)) { + Timestamp ts = rs.getTimestamp(1); +} +``` + ### 通过参数绑定写入数据 TDengine 的 JDBC 原生连接实现大幅改进了参数绑定方式对数据写入(INSERT)场景的支持。采用这种方式写入数据时,能避免 SQL 语法解析的资源消耗,从而在很多情况下显著提升写入性能。 @@ -939,6 +954,14 @@ public class SchemalessWsTest { +### 执行带有 reqId 的无模式写入 + +此 reqId 可用于请求链路追踪。 + +```java +writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS, 1L); +``` + ### 数据订阅 TDengine Java 连接器支持订阅功能,应用 API 如下: @@ -996,7 +1019,7 @@ while(true) { #### 指定订阅 Offset -``` +```java long position(TopicPartition partition) throws SQLException; Map position(String topic) throws SQLException; Map beginningOffsets(String topic) throws SQLException; @@ -1005,6 +1028,29 @@ Map endOffsets(String topic) throws SQLException; void seek(TopicPartition partition, long offset) throws SQLException; ``` +示例代码: + +```java +String topic = "offset_seek_test"; +Map offset = null; +try (TaosConsumer consumer = new TaosConsumer<>(properties)) { + consumer.subscribe(Collections.singletonList(topic)); + for (int i = 0; i < 10; i++) { + if (i == 3) { + // Saving consumption position + offset = consumer.position(topic); + } + if (i == 5) { + // reset consumption to the previously saved position + for (Map.Entry entry : offset.entrySet()) { + consumer.seek(entry.getKey(), entry.getValue()); + } + } + ConsumerRecords records = consumer.poll(Duration.ofMillis(500)); + } +} +``` + #### 关闭订阅 ```java diff --git a/docs/zh/08-connector/30-python.mdx b/docs/zh/08-connector/30-python.mdx index f4f1aad63b846aff51fbbba504e5c0e24479315f..8752dc214565c7834cdc6903f5247cd4c64194a2 100644 --- a/docs/zh/08-connector/30-python.mdx +++ b/docs/zh/08-connector/30-python.mdx @@ -25,6 +25,36 @@ Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-con 无论使用什么版本的 TDengine 都建议使用最新版本的 `taospy`。 +## 处理异常 + +Python 连接器可能会产生 4 种异常: + +- Python 连接器本身的异常 +- 原生连接方式的异常 +- websocket 连接方式异常 +- 数据订阅异常 +- TDengine 其他功能模块的异常 + +|Error Type|Description|Suggested Actions| +|:--------:|:---------:|:---------------:| +|InterfaceError|taosc 版本太低,不支持所使用的接口|请检查 TDengine 客户端版本| +|ConnectionError|数据库链接错误|请检查 TDengine 服务端状态和连接参数| +|DatabaseError|数据库错误|请检查 TDengine 服务端版本,并将 Python 连接器升级到最新版| +|OperationalError|操作错误|API 使用错误,请检查代码| +|ProgrammingError||| +|StatementError|stmt 相关异常|| +|ResultError||| +|SchemalessError|schemaless 相关异常|| +|TmqError|tmq 相关异常|| + +Python 中通常通过 try-expect 处理异常,异常处理相关请参考 [Python 错误和异常文档](https://docs.python.org/3/tutorial/errors.html)。 + +Python Connector 的所有数据库操作如果出现异常,都会直接抛出来。由应用程序负责异常处理。比如: + +```python +{{#include docs/examples/python/handle_exception.py}} +``` + ## 支持的功能 - 原生连接支持 TDengine 的所有核心功能, 包括: 连接管理、执行 SQL、参数绑定、订阅、无模式写入(schemaless)。 @@ -32,7 +62,7 @@ Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-con ## 安装 -### 准备 +### 安装前准备 1. 安装 Python。新近版本 taospy 包要求 Python 3.6.2+。早期版本 taospy 包要求 Python 3.7+。taos-ws-py 包要求 Python 3.7+。如果系统上还没有 Python 可参考 [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) 安装。 2. 安装 [pip](https://pypi.org/project/pip/)。大部分情况下 Python 的安装包都自带了 pip 工具, 如果没有请参考 [pip documentation](https://pip.pypa.io/en/stable/installation/) 安装。 @@ -274,7 +304,7 @@ Transfer-Encoding: chunked -## 示例程序 +## 使用示例 ### 基本使用 @@ -343,6 +373,10 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线 +#### Connection 类的使用 + +`Connection` 类既包含对 PEP249 Connection 接口的实现(如:cursor方法和 close 方法),也包含很多扩展功能(如: execute、 query、schemaless_insert 和 subscribe 方法。 + ```python {{#include docs/examples/python/connect_websocket_examples.py:basic}} ``` @@ -353,6 +387,46 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线 +### 查询数据 + + + + +`TaosConnection` 类的 `query` 方法可以用来查询数据,返回 `TaosResult` 类型的结果数据。 + +```python +{{#include docs/examples/python/connection_usage_native_reference.py:query}} +``` + +:::tip +查询结果只能获取一次。比如上面的示例中 `fetch_all()` 和 `fetch_all_into_dict()` 只能用一个。重复获取得到的结果为空列表。 +::: + + + + + +RestClient 类是对于 REST API 的直接封装。它只包含一个 sql() 方法用于执行任意 SQL 语句, 并返回执行结果。 + +```python +{{#include docs/examples/python/rest_client_example.py}} +``` + +对于 `sql()` 方法更详细的介绍, 请参考 [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html)。 + + + + + +`TaosConnection` 类的 `query` 方法可以用来查询数据,返回 `TaosResult` 类型的结果数据。 + +```python +{{#include docs/examples/python/connect_websocket_examples.py:basic}} +``` + + + + ### 与 req_id 一起使用 使用可选的 req_id 参数,指定请求 id,可以用于 tracing @@ -807,7 +881,7 @@ stmt.close() -### 其它示例程序 +### 更多示例程序 | 示例程序链接 | 示例程序内容 | | ------------------------------------------------------------------------------------------------------------- | ----------------------- | @@ -819,14 +893,6 @@ stmt.close() ## 其它说明 -### 异常处理 - -所有数据库操作如果出现异常,都会直接抛出来。由应用程序负责异常处理。比如: - -```python -{{#include docs/examples/python/handle_exception.py}} -``` -`` ### 关于纳秒 (nanosecond) 由于目前 Python 对 nanosecond 支持的不完善(见下面的链接),目前的实现方式是在 nanosecond 精度时返回整数,而不是 ms 和 us 返回的 datetime 类型,应用开发者需要自行处理,建议使用 pandas 的 to_datetime()。未来如果 Python 正式完整支持了纳秒,Python 连接器可能会修改相关接口。 diff --git a/docs/zh/08-connector/index.md b/docs/zh/08-connector/index.md index d9c1a07d3c9067340df1cd46e9736be8347a9f0a..92bc8ed0ce81f27ebf3336669e7b60834581a559 100644 --- a/docs/zh/08-connector/index.md +++ b/docs/zh/08-connector/index.md @@ -45,9 +45,8 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器 | **连接管理** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 | | **普通查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 | | **参数绑定** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 | -| **数据订阅(TMQ)** | 暂不支持 | 支持 | 支持 | 支持 | 支持 | 支持 | +| **数据订阅(TMQ)** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 | | **Schemaless** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 | -| **DataFrame** | 不支持 | 支持 | 不支持 | 不支持 | 不支持 | 不支持 | :::info 由于不同编程语言数据库框架规范不同,并不意味着所有 C/C++ 接口都需要对应封装支持。 @@ -59,11 +58,10 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器 | ------------------------------ | -------- | ---------- | -------- | -------- | ----------- | -------- | | **连接管理** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 | | **普通查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 | -| **参数绑定** | 暂不支持 | 暂不支持 | 支持 | 支持 | 暂不支持 | 支持 | +| **参数绑定** | 支持 | 暂不支持 | 支持 | 支持 | 暂不支持 | 支持 | | **数据订阅(TMQ)** | 支持 | 支持 | 支持 | 暂不支持 | 暂不支持 | 支持 | -| **Schemaless** | 支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | +| **Schemaless** | 支持 | 暂不支持 | 支持 | 暂不支持 | 暂不支持 | 支持 | | **批量拉取(基于 WebSocket)** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 | -| **DataFrame** | 不支持 | 支持 | 不支持 | 不支持 | 不支持 | 不支持 | :::warning diff --git a/docs/zh/12-taos-sql/22-meta.md b/docs/zh/12-taos-sql/22-meta.md index 7fb60b85a7db75e2df2e7612862963f3e847e4e5..3fffbd07061d4a4868028516ba80e49c54129ba1 100644 --- a/docs/zh/12-taos-sql/22-meta.md +++ b/docs/zh/12-taos-sql/22-meta.md @@ -81,7 +81,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数 | 3 | ntables | INT | 数据库中表的数量,包含子表和普通表但不包含超级表 | | 4 | vgroups | INT | 数据库中有多少个 vgroup。需要注意,`vgroups` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | | 6 | replica | INT | 副本数。需要注意,`replica` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | -| 7 | strict | BINARY(3) | 强一致性。需要注意,`strict` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | +| 7 | strict | BINARY(4) | 废弃参数 | | 8 | duration | INT | 单文件存储数据的时间跨度。需要注意,`duration` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | | 9 | keep | INT | 数据保留时长。需要注意,`keep` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | | 10 | buffer | INT | 每个 vnode 写缓存的内存块大小,单位 MB。需要注意,`buffer` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | diff --git a/docs/zh/14-reference/12-config/index.md b/docs/zh/14-reference/12-config/index.md index 51748b68c4629baaa94fbafbb9f3f7c462daa7c7..a637b52bf852f4126d7711c206cd402945be0d78 100755 --- a/docs/zh/14-reference/12-config/index.md +++ b/docs/zh/14-reference/12-config/index.md @@ -19,16 +19,20 @@ taosd -C ## 为客户端指定配置文件 -TDengine 系统的前台交互客户端应用程序为 taos,以及应用驱动,它可以与 taosd 共享同一个配置文件 taos.cfg,也可以使用单独指定配置文件。运行 taos 时,使用参数-c 指定配置文件目录,如 taos -c /home/cfg,表示使用/home/cfg/目录下的 taos.cfg 配置文件中的参数,缺省目录是/etc/taos。更多 taos 的使用方法请见帮助信息 `taos --help`。 +TDengine 系统的前台交互客户端应用程序为 taos,以及应用驱动,它可以与 taosd 共享同一个配置文件 taos.cfg,也可以使用单独指定配置文件。运行 taos 时,使用参数-c 指定配置文件目录,如: -```bash -taos -C ``` +taos -c /home/cfg +``` + +表示使用/home/cfg/目录下的 taos.cfg 配置文件中的参数,缺省目录是/etc/taos。 另外可以使用 `-C` 显示当前服务器配置参数: ```bash -taos --dump-config +taos -C ``` +更多 taos 的使用方法请见帮助信息 `taos --help`。 + ## 配置参数详细列表 :::note @@ -139,6 +143,8 @@ taos --dump-config :::note 请注意,完整的监控功能需要安装并运行 `taoskeeper` 服务。taoskeeper 负责接收监控指标数据并创建 `log` 库。 +::: + ### monitor | 属性 | 说明 | diff --git a/docs/zh/20-third-party/13-dbeaver.md b/docs/zh/20-third-party/13-dbeaver.md new file mode 100644 index 0000000000000000000000000000000000000000..20c8baa7dc1161101b58fff8426a861844c3ed1d --- /dev/null +++ b/docs/zh/20-third-party/13-dbeaver.md @@ -0,0 +1,67 @@ +--- +sidebar_label: DBeaver +title: DBeaver +description: 使用 DBeaver 存取 TDengine 数据的详细指南 +--- + +DBeaver 是一款流行的跨平台数据库管理工具,方便开发者、数据库管理员、数据分析师等用户管理数据。DBeaver 从 23.1.1 版本开始内嵌支持 TDengine。既支持独立部署的 TDengine 集群也支持 TDengine Cloud。 + +## 前置条件 + +### 安装 DBeaver + +使用 DBeaver 管理 TDengine 需要以下几方面的准备工作。 + +- 安装 DBeaver。DBeaver 支持主流操作系统包括 Windows、macOS 和 Linux。请注意[下载](https://dbeaver.io/download/)正确平台和版本(23.1.1+)的安装包。详细安装步骤请参考 [DBeaver 官方文档](https://github.com/dbeaver/dbeaver/wiki/Installation)。 +- 如果使用独立部署的 TDengine 集群,请确认 TDengine 正常运行,并且 taosAdapter 已经安装并正常运行,具体细节请参考 [taosAdapter 的使用手册](/reference/taosadapter)。 +- 如果使用 TDengine Cloud,请[注册](https://cloud.taosdata.com/)相应账号。 + +## 使用步骤 + +### 使用 DBeaver 访问内部部署的 TDengine + +1. 启动 DBeaver 应用,点击按钮或菜单项选择“连接到数据库”,然后在时间序列分类栏中选择 TDengine。 + +![DBeaver 连接 TDengine](./dbeaver/dbeaver-connect-tdengine-zh.webp) + +2. 配置 TDengine 连接,填入主机地址、端口号、用户名和密码。如果 TDengine 部署在本机,可以只填用户名和密码,默认用户名为 root,默认密码为 taosdata。点击“测试连接”可以对连接是否可用进行测试。如果本机没有安装 TDengine Java + 连接器,DBeaver 会提示下载安装。 + + ![配置 TDengine 连接](./dbeaver/dbeaver-config-tdengine-zh.webp) + +3. 连接成功将显示如下图所示。如果显示连接失败,请检查 TDengine 服务和 taosAdapter 是否正确运行,主机地址、端口号、用户名和密码是否正确。 + +![连接成功](./dbeaver/dbeaver-connect-tdengine-test-zh.webp) + +4. 使用 DBeaver 选择数据库和表可以浏览 TDengine 服务的数据。 + +![DBeaver 浏览 TDengine 数据](./dbeaver/dbeaver-browse-data-zh.webp) + +5. 也可以通过执行 SQL 命令的方式对 TDengine 数据进行操作。 + +![DBeaver SQL 命令](./dbeaver/dbeaver-sql-execution-zh.webp) + +### 使用 DBeaver 访问 TDengine Cloud + +1. 登录 TDengine Cloud 服务,在管理界面中选择“编程”和“Java”,然后复制 TDENGINE_JDBC_URL 的字符串值。 + +![复制 TDengine Cloud DSN](./dbeaver/tdengine-cloud-jdbc-dsn-zh.webp) + +2. 启动 DBeaver 应用,点击按钮或菜单项选择“连接到数据库”,然后在时间序列分类栏中选择 TDengine Cloud。 + +![DBeaver 连接 TDengine Cloud](./dbeaver/dbeaver-connect-tdengine-cloud-zh.webp) + + +3. 配置 TDengine Cloud 连接,填入 JDBC_URL 值。点击“测试连接”,如果本机没有安装 TDengine Java + 连接器,DBeaver 会提示下载安装。连接成功将显示如下图所示。如果显示连接失败,请检查 TDengine Cloud 服务是否启动,JDBC_URL 是否正确。 + + ![配置 TDengine Cloud 连接](./dbeaver/dbeaver-connect-tdengine-cloud-test-zh.webp) + +4. 使用 DBeaver 选择数据库和表可以浏览 TDengine Cloud 服务的数据。 + +![DBeaver 浏览 TDengine Cloud 数据](./dbeaver/dbeaver-browse-cloud-data-zh.webp) + +5. 也可以通过执行 SQL 命令的方式对 TDengine Cloud 数据进行操作。 + +![DBeaver SQL 命令 操作 TDengine Cloud](./dbeaver/dbeaver-sql-execution-cloud-zh.webp) + diff --git a/docs/zh/20-third-party/dbeaver/dbeaver-browse-cloud-data-zh.webp b/docs/zh/20-third-party/dbeaver/dbeaver-browse-cloud-data-zh.webp new file mode 100644 index 0000000000000000000000000000000000000000..28580c9dfbbea61c1d7225ee4e632ee4258207b8 Binary files /dev/null and b/docs/zh/20-third-party/dbeaver/dbeaver-browse-cloud-data-zh.webp differ diff --git a/docs/zh/20-third-party/dbeaver/dbeaver-browse-data-zh.webp b/docs/zh/20-third-party/dbeaver/dbeaver-browse-data-zh.webp new file mode 100644 index 0000000000000000000000000000000000000000..b2faca92b2b6d3fa707c8863de4fb77fdeb49c8f Binary files /dev/null and b/docs/zh/20-third-party/dbeaver/dbeaver-browse-data-zh.webp differ diff --git a/docs/zh/20-third-party/dbeaver/dbeaver-config-tdengine-zh.webp b/docs/zh/20-third-party/dbeaver/dbeaver-config-tdengine-zh.webp new file mode 100644 index 0000000000000000000000000000000000000000..c721cf029b3408928bd5daca4ca0a3fe7ed865f5 Binary files /dev/null and b/docs/zh/20-third-party/dbeaver/dbeaver-config-tdengine-zh.webp differ diff --git a/docs/zh/20-third-party/dbeaver/dbeaver-connect-tdengine-cloud-test-zh.webp b/docs/zh/20-third-party/dbeaver/dbeaver-connect-tdengine-cloud-test-zh.webp new file mode 100644 index 0000000000000000000000000000000000000000..721fe50cfb8aace700e6528e0c5e5e6525429a86 Binary files /dev/null and b/docs/zh/20-third-party/dbeaver/dbeaver-connect-tdengine-cloud-test-zh.webp differ diff --git a/docs/zh/20-third-party/dbeaver/dbeaver-connect-tdengine-cloud-zh.webp b/docs/zh/20-third-party/dbeaver/dbeaver-connect-tdengine-cloud-zh.webp new file mode 100644 index 0000000000000000000000000000000000000000..b6f12a47082664b8cb29fa841add40443595d8d4 Binary files /dev/null and b/docs/zh/20-third-party/dbeaver/dbeaver-connect-tdengine-cloud-zh.webp differ diff --git a/docs/zh/20-third-party/dbeaver/dbeaver-connect-tdengine-test-zh.webp b/docs/zh/20-third-party/dbeaver/dbeaver-connect-tdengine-test-zh.webp new file mode 100644 index 0000000000000000000000000000000000000000..2553ea166ae5d1b08f823e0cf71881cd7da31862 Binary files /dev/null and b/docs/zh/20-third-party/dbeaver/dbeaver-connect-tdengine-test-zh.webp differ diff --git a/docs/zh/20-third-party/dbeaver/dbeaver-connect-tdengine-zh.webp b/docs/zh/20-third-party/dbeaver/dbeaver-connect-tdengine-zh.webp new file mode 100644 index 0000000000000000000000000000000000000000..eff8bd9d21163429005961f2b6ec9594271243d8 Binary files /dev/null and b/docs/zh/20-third-party/dbeaver/dbeaver-connect-tdengine-zh.webp differ diff --git a/docs/zh/20-third-party/dbeaver/dbeaver-sql-execution-cloud-zh.webp b/docs/zh/20-third-party/dbeaver/dbeaver-sql-execution-cloud-zh.webp new file mode 100644 index 0000000000000000000000000000000000000000..6efdf48e3e38faea7ed66546fba10c4f1a42460c Binary files /dev/null and b/docs/zh/20-third-party/dbeaver/dbeaver-sql-execution-cloud-zh.webp differ diff --git a/docs/zh/20-third-party/dbeaver/dbeaver-sql-execution-zh.webp b/docs/zh/20-third-party/dbeaver/dbeaver-sql-execution-zh.webp new file mode 100644 index 0000000000000000000000000000000000000000..fa95889d7235ce5101dd37f567681d89aa697041 Binary files /dev/null and b/docs/zh/20-third-party/dbeaver/dbeaver-sql-execution-zh.webp differ diff --git a/docs/zh/20-third-party/dbeaver/tdengine-cloud-jdbc-dsn-zh.webp b/docs/zh/20-third-party/dbeaver/tdengine-cloud-jdbc-dsn-zh.webp new file mode 100644 index 0000000000000000000000000000000000000000..bfa474f3ac5f323e59e35b27616951ce3165f942 Binary files /dev/null and b/docs/zh/20-third-party/dbeaver/tdengine-cloud-jdbc-dsn-zh.webp differ diff --git a/docs/zh/21-tdinternal/01-arch.md b/docs/zh/21-tdinternal/01-arch.md index 6150f2e7579fdd64c562b0c18f9963a6eed20d9d..32d940abc194a77e77524ace594ff79a49bffc7d 100644 --- a/docs/zh/21-tdinternal/01-arch.md +++ b/docs/zh/21-tdinternal/01-arch.md @@ -165,9 +165,7 @@ Vnode 会保持一个数据版本号(version),对内存数据进行持久 ### 同步复制 -对于数据一致性要求更高的场景,异步数据复制提供的最终一致性无法满足要求。因此 TDengine 提供同步复制的机制供用户选择。在创建数据库时,除指定副本数 `replica` 之外,用户还需要指定新的参数 `strict`。如果 `strict` 等于 1,它表示每次 leader 转发给副本时,需要等待半数以上副本达成一致后,才能通知应用,数据在 follower 已经写入成功。如果在一定的时间内,得不到半数以上副本的确认,leader vnode 将返回错误给应用。 - -采用同步复制,系统的性能会有所下降,而且 latency 会增加。因为元数据要强一致,mnode 之间的数据同步缺省就是采用的同步复制。 +对于数据一致性要求更高的场景,异步数据复制提供的最终一致性无法满足要求。因此 TDengine 3.0 使用了同步复制的机制(参照 RAFT 协议的标准实现)。每次 leader vnode 转发给其他副本时,需要等待半数以上(包含自己)副本达成一致后,才能通知应用写入成功。如果在一定的时间内,得不到半数以上副本的确认,leader vnode 将返回错误给应用。 ## 缓存与持久化 diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 929f2f208a4c0be0a6732bcab4e0427f41fba52a..fa092a453c01127edc2875513519f7cc40571e7c 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -3386,7 +3386,6 @@ typedef struct { int8_t reserved; } SMqHbRsp; - #define TD_AUTO_CREATE_TABLE 0x1 typedef struct { int64_t suid; diff --git a/include/libs/function/functionMgt.h b/include/libs/function/functionMgt.h index 55af50e0bc2ea54870cc2d2632ea875508ff4511..402b8f03092789b2ead705f3bd93b1bc235e78bc 100644 --- a/include/libs/function/functionMgt.h +++ b/include/libs/function/functionMgt.h @@ -216,7 +216,7 @@ bool fmIsUserDefinedFunc(int32_t funcId); bool fmIsDistExecFunc(int32_t funcId); bool fmIsForbidFillFunc(int32_t funcId); bool fmIsForbidStreamFunc(int32_t funcId); -bool fmIsForbidSuperTableFunc(int32_t funcId); +bool fmIsForbidSysTableFunc(int32_t funcId); bool fmIsIntervalInterpoFunc(int32_t funcId); bool fmIsInterpFunc(int32_t funcId); bool fmIsLastRowFunc(int32_t funcId); diff --git a/include/libs/nodes/querynodes.h b/include/libs/nodes/querynodes.h index 678c694d9b8ed32a3049962a0b0c6ccd04be5f1b..dc312a762ee9f7b396d6926e685e9e4b46fc15f1 100644 --- a/include/libs/nodes/querynodes.h +++ b/include/libs/nodes/querynodes.h @@ -52,6 +52,7 @@ typedef struct SExprNode { SArray* pAssociation; bool orderAlias; bool asAlias; + bool asParam; } SExprNode; typedef enum EColumnType { diff --git a/include/libs/wal/wal.h b/include/libs/wal/wal.h index a4c1e6a64836eca542c513ea1831ccefb3606e1f..b19a0d783d2df9e70b9b21d0b5321ec98df35880 100644 --- a/include/libs/wal/wal.h +++ b/include/libs/wal/wal.h @@ -214,7 +214,6 @@ int32_t walSkipFetchBody(SWalReader *pRead, const SWalCkHead *pHead); void walRefFirstVer(SWal *, SWalRef *); void walRefLastVer(SWal *, SWalRef *); -//void walRefCommitVer(SWal *, SWalRef *); SWalRef *walOpenRef(SWal *); void walCloseRef(SWal *pWal, int64_t refId); diff --git a/include/util/taoserror.h b/include/util/taoserror.h index 889ee41a2918b8b2b43659e43eb8fd25ab3695f1..ce24761df978422a677241e7ed8249ab3356deff 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -704,6 +704,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_PAR_INVALID_TAGS_PC TAOS_DEF_ERROR_CODE(0, 0x2665) #define TSDB_CODE_PAR_INVALID_TIMELINE_QUERY TAOS_DEF_ERROR_CODE(0, 0x2666) #define TSDB_CODE_PAR_INVALID_OPTR_USAGE TAOS_DEF_ERROR_CODE(0, 0x2667) +#define TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC TAOS_DEF_ERROR_CODE(0, 0x2668) #define TSDB_CODE_PAR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x26FF) //planner diff --git a/include/util/tlosertree.h b/include/util/tlosertree.h index 51906443f5ab874a2e7b16a11304ed6890f90437..b3aa37a537fda3aab2241b263b085624d0b6464a 100644 --- a/include/util/tlosertree.h +++ b/include/util/tlosertree.h @@ -43,7 +43,7 @@ typedef struct SMultiwayMergeTreeInfo { int32_t tMergeTreeCreate(SMultiwayMergeTreeInfo **pTree, uint32_t numOfEntries, void *param, __merge_compare_fn_t compareFn); -void tMergeTreeDestroy(SMultiwayMergeTreeInfo *pTree); +void tMergeTreeDestroy(SMultiwayMergeTreeInfo **pTree); void tMergeTreeAdjust(SMultiwayMergeTreeInfo *pTree, int32_t idx); diff --git a/include/util/tlrucache.h b/include/util/tlrucache.h index c9cf71c2fd99398bd3f5e55bd08127f4278ed1f4..e5e59d0876e1f892dbe25fb03674d5612a04f9d7 100644 --- a/include/util/tlrucache.h +++ b/include/util/tlrucache.h @@ -24,7 +24,8 @@ extern "C" { typedef struct SLRUCache SLRUCache; -typedef void (*_taos_lru_deleter_t)(const void *key, size_t keyLen, void *value); +typedef void (*_taos_lru_deleter_t)(const void *key, size_t keyLen, void *value, void *ud); +typedef int (*_taos_lru_functor_t)(const void *key, size_t keyLen, void *value, void *ud); typedef struct LRUHandle LRUHandle; @@ -41,10 +42,11 @@ SLRUCache *taosLRUCacheInit(size_t capacity, int numShardBits, double highPriPoo void taosLRUCacheCleanup(SLRUCache *cache); LRUStatus taosLRUCacheInsert(SLRUCache *cache, const void *key, size_t keyLen, void *value, size_t charge, - _taos_lru_deleter_t deleter, LRUHandle **handle, LRUPriority priority); + _taos_lru_deleter_t deleter, LRUHandle **handle, LRUPriority priority, void *ud); LRUHandle *taosLRUCacheLookup(SLRUCache *cache, const void *key, size_t keyLen); void taosLRUCacheErase(SLRUCache *cache, const void *key, size_t keyLen); +void taosLRUCacheApply(SLRUCache *cache, _taos_lru_functor_t functor, void *ud); void taosLRUCacheEraseUnrefEntries(SLRUCache *cache); bool taosLRUCacheRef(SLRUCache *cache, LRUHandle *handle); diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 2a73156e8a03ffc4e46d159383f8bee1defd30cb..955c90fc81c0129c18691e29c064cccdb3478541 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -1788,6 +1788,7 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i len += lenTmp; pStart += lenTmp; + int32_t estimateColLen = 0; for (int32_t j = 0; j < numOfRows; ++j) { if (offset[j] == -1) { continue; @@ -1797,20 +1798,21 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i int32_t jsonInnerType = *data; char* jsonInnerData = data + CHAR_BYTES; if (jsonInnerType == TSDB_DATA_TYPE_NULL) { - len += (VARSTR_HEADER_SIZE + strlen(TSDB_DATA_NULL_STR_L)); + estimateColLen += (VARSTR_HEADER_SIZE + strlen(TSDB_DATA_NULL_STR_L)); } else if (tTagIsJson(data)) { - len += (VARSTR_HEADER_SIZE + ((const STag*)(data))->len); + estimateColLen += (VARSTR_HEADER_SIZE + ((const STag*)(data))->len); } else if (jsonInnerType == TSDB_DATA_TYPE_NCHAR) { // value -> "value" - len += varDataTLen(jsonInnerData) + CHAR_BYTES * 2; + estimateColLen += varDataTLen(jsonInnerData) + CHAR_BYTES * 2; } else if (jsonInnerType == TSDB_DATA_TYPE_DOUBLE) { - len += (VARSTR_HEADER_SIZE + 32); + estimateColLen += (VARSTR_HEADER_SIZE + 32); } else if (jsonInnerType == TSDB_DATA_TYPE_BOOL) { - len += (VARSTR_HEADER_SIZE + 5); + estimateColLen += (VARSTR_HEADER_SIZE + 5); } else { tscError("estimateJsonLen error: invalid type:%d", jsonInnerType); return -1; } } + len += TMAX(colLen, estimateColLen); } else if (IS_VAR_DATA_TYPE(pResultInfo->fields[i].type)) { int32_t lenTmp = numOfRows * sizeof(int32_t); len += (lenTmp + colLen); diff --git a/source/client/src/clientSmlLine.c b/source/client/src/clientSmlLine.c index 40e014458e815a68de4dccac310ee1ac9442bcaf..c5832ce1ce526021696cdbd0d850fa93515f9add 100644 --- a/source/client/src/clientSmlLine.c +++ b/source/client/src/clientSmlLine.c @@ -202,7 +202,7 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin bool keyEscaped = false; size_t keyLenEscaped = 0; while (*sql < sqlEnd) { - if (unlikely(IS_COMMA(*sql))) { + if (unlikely(IS_SPACE(*sql) || IS_COMMA(*sql))) { smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", *sql); return TSDB_CODE_SML_INVALID_DATA; } @@ -410,7 +410,7 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin bool keyEscaped = false; size_t keyLenEscaped = 0; while (*sql < sqlEnd) { - if (unlikely(IS_COMMA(*sql))) { + if (unlikely(IS_SPACE(*sql) || IS_COMMA(*sql))) { smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", *sql); return TSDB_CODE_SML_INVALID_DATA; } @@ -436,19 +436,20 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin size_t valueLen = 0; bool valueEscaped = false; size_t valueLenEscaped = 0; - bool isInQuote = false; + int quoteNum = 0; const char *escapeChar = NULL; while (*sql < sqlEnd) { // parse value if (unlikely(*(*sql) == QUOTE && (*(*sql - 1) != SLASH || (*sql - 1) == escapeChar))) { - isInQuote = !isInQuote; + quoteNum++; (*sql)++; - continue; - } - if (!isInQuote) { - if (unlikely(IS_SPACE(*sql) || IS_COMMA(*sql))) { + if(quoteNum > 2){ break; } + continue; + } + if (quoteNum % 2 == 0 && (unlikely(IS_SPACE(*sql) || IS_COMMA(*sql)))) { + break; } if (IS_SLASH_LETTER_IN_FIELD_VALUE(*sql) && (*sql - 1) != escapeChar) { escapeChar = *sql; @@ -460,8 +461,8 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin } valueLen = *sql - value; - if (unlikely(isInQuote)) { - smlBuildInvalidDataMsg(&info->msgBuf, "only one quote", value); + if (unlikely(quoteNum != 0 && quoteNum != 2)) { + smlBuildInvalidDataMsg(&info->msgBuf, "unbalanced quotes", value); return TSDB_CODE_SML_INVALID_DATA; } if (unlikely(valueLen == 0)) { diff --git a/source/client/test/smlTest.cpp b/source/client/test/smlTest.cpp index 1578b8b607f8ea19b4ee3efb7573c054b6c379fc..054698fff0bdc542ac31847e2bb70beb4b3e573f 100644 --- a/source/client/test/smlTest.cpp +++ b/source/client/test/smlTest.cpp @@ -224,6 +224,8 @@ TEST(testCase, smlParseCols_Error_Test) { "st,tt=aa c 1=2 1626006833639000000,", //field value double quote,slash "st,tt=aa c=\"a\"a\" 1626006833639000000,", + "escape_test,tag1=\"tag1_value\",tag2=\"tag2_value\" co l0=\"col0_value\",col1=\"col1_value\" 1680918783010000000", + "escape_test,tag1=\"tag1_value\",tag2=\"tag2_value\" col0=\"co\"l\"0_value\",col1=\"col1_value\" 1680918783010000000" }; SSmlHandle *info = smlBuildSmlInfo(NULL); diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 0104384bd3cc89282f34ff3fa7409bc5f155f084..5d1288d831c5c68ef52140d062111347684213e1 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -1713,7 +1713,8 @@ static int32_t colDataMoveVarData(SColumnInfoData* pColInfoData, size_t start, s static void colDataTrimFirstNRows(SColumnInfoData* pColInfoData, size_t n, size_t total) { if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) { - pColInfoData->varmeta.length = colDataMoveVarData(pColInfoData, n, total); + // pColInfoData->varmeta.length = colDataMoveVarData(pColInfoData, n, total); + memmove(pColInfoData->varmeta.offset, &pColInfoData->varmeta.offset[n], (total - n) * sizeof(int32_t)); // clear the offset value of the unused entries. memset(&pColInfoData->varmeta.offset[total - n], 0, n); @@ -1745,7 +1746,7 @@ int32_t blockDataTrimFirstRows(SSDataBlock* pBlock, size_t n) { static void colDataKeepFirstNRows(SColumnInfoData* pColInfoData, size_t n, size_t total) { if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) { - pColInfoData->varmeta.length = colDataMoveVarData(pColInfoData, 0, n); + // pColInfoData->varmeta.length = colDataMoveVarData(pColInfoData, 0, n); memset(&pColInfoData->varmeta.offset[n], 0, total - n); } else { // reset the bitmap value /*int32_t stopIndex = BitmapLen(n) * 8; diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h index 44bd8f74c8d19fa55ea900def0f5435557098d63..8b75795d41cd1924a16cc3c7121bedef7737d69c 100644 --- a/source/dnode/mnode/impl/inc/mndDef.h +++ b/source/dnode/mnode/impl/inc/mndDef.h @@ -468,6 +468,7 @@ typedef struct { int8_t replica; int16_t numOfColumns; int32_t numOfRows; + int32_t curIterPackedRows; void* pIter; SMnode* pMnode; STableMetaRsp* pMeta; @@ -606,25 +607,25 @@ void tDeleteSubscribeObj(SMqSubscribeObj* pSub); int32_t tEncodeSubscribeObj(void** buf, const SMqSubscribeObj* pSub); void* tDecodeSubscribeObj(const void* buf, SMqSubscribeObj* pSub, int8_t sver); -typedef struct { - int32_t epoch; - SArray* consumers; // SArray -} SMqSubActionLogEntry; - -SMqSubActionLogEntry* tCloneSMqSubActionLogEntry(SMqSubActionLogEntry* pEntry); -void tDeleteSMqSubActionLogEntry(SMqSubActionLogEntry* pEntry); -int32_t tEncodeSMqSubActionLogEntry(void** buf, const SMqSubActionLogEntry* pEntry); -void* tDecodeSMqSubActionLogEntry(const void* buf, SMqSubActionLogEntry* pEntry); - -typedef struct { - char key[TSDB_SUBSCRIBE_KEY_LEN]; - SArray* logs; // SArray -} SMqSubActionLogObj; - -SMqSubActionLogObj* tCloneSMqSubActionLogObj(SMqSubActionLogObj* pLog); -void tDeleteSMqSubActionLogObj(SMqSubActionLogObj* pLog); -int32_t tEncodeSMqSubActionLogObj(void** buf, const SMqSubActionLogObj* pLog); -void* tDecodeSMqSubActionLogObj(const void* buf, SMqSubActionLogObj* pLog); +//typedef struct { +// int32_t epoch; +// SArray* consumers; // SArray +//} SMqSubActionLogEntry; + +//SMqSubActionLogEntry* tCloneSMqSubActionLogEntry(SMqSubActionLogEntry* pEntry); +//void tDeleteSMqSubActionLogEntry(SMqSubActionLogEntry* pEntry); +//int32_t tEncodeSMqSubActionLogEntry(void** buf, const SMqSubActionLogEntry* pEntry); +//void* tDecodeSMqSubActionLogEntry(const void* buf, SMqSubActionLogEntry* pEntry); +// +//typedef struct { +// char key[TSDB_SUBSCRIBE_KEY_LEN]; +// SArray* logs; // SArray +//} SMqSubActionLogObj; +// +//SMqSubActionLogObj* tCloneSMqSubActionLogObj(SMqSubActionLogObj* pLog); +//void tDeleteSMqSubActionLogObj(SMqSubActionLogObj* pLog); +//int32_t tEncodeSMqSubActionLogObj(void** buf, const SMqSubActionLogObj* pLog); +//void* tDecodeSMqSubActionLogObj(const void* buf, SMqSubActionLogObj* pLog); typedef struct { int32_t oldConsumerNum; @@ -643,7 +644,7 @@ typedef struct { SArray* removedConsumers; // SArray SArray* modifyConsumers; // SArray SMqSubscribeObj* pSub; - SMqSubActionLogEntry* pLogEntry; +// SMqSubActionLogEntry* pLogEntry; } SMqRebOutputObj; typedef struct { diff --git a/source/dnode/mnode/impl/inc/mndStream.h b/source/dnode/mnode/impl/inc/mndStream.h index d873df621e8b44dca5adc9b4c4affaddc78730c2..f9f01c77ed942ee318d36c99487f9f7a65418543 100644 --- a/source/dnode/mnode/impl/inc/mndStream.h +++ b/source/dnode/mnode/impl/inc/mndStream.h @@ -40,6 +40,8 @@ int32_t mndPersistDropStreamLog(SMnode *pMnode, STrans *pTrans, SStreamObj *pStr int32_t mndDropStreamByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb); +int32_t mndGetNumOfStreams(SMnode *pMnode, char *dbName, int32_t *pNumOfStreams); + #ifdef __cplusplus } #endif diff --git a/source/dnode/mnode/impl/src/mndProfile.c b/source/dnode/mnode/impl/src/mndProfile.c index 06bb46772aee20a7049c5ce47e1b54756911f14c..5482f369409a89484974444de27f111e680c2f10 100644 --- a/source/dnode/mnode/impl/src/mndProfile.c +++ b/source/dnode/mnode/impl/src/mndProfile.c @@ -765,107 +765,129 @@ static int32_t mndRetrieveConns(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl return numOfRows; } -static int32_t mndRetrieveQueries(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows) { - SMnode *pMnode = pReq->info.node; - SSdb *pSdb = pMnode->pSdb; - int32_t numOfRows = 0; - int32_t cols = 0; - SConnObj *pConn = NULL; - - if (pShow->pIter == NULL) { - SProfileMgmt *pMgmt = &pMnode->profileMgmt; - pShow->pIter = taosCacheCreateIter(pMgmt->connCache); +/** + * @param pConn the conn queries pack from + * @param[out] pBlock the block data packed into + * @param offset skip [offset] queries in pConn + * @param rowsToPack at most rows to pack + * @return rows packed +*/ +static int32_t packQueriesIntoBlock(SShowObj* pShow, SConnObj* pConn, SSDataBlock* pBlock, uint32_t offset, uint32_t rowsToPack) { + int32_t cols = 0; + taosRLockLatch(&pConn->queryLock); + int32_t numOfQueries = taosArrayGetSize(pConn->pQueries); + if (NULL == pConn->pQueries || numOfQueries <= offset) { + taosRUnLockLatch(&pConn->queryLock); + return 0; } - while (numOfRows < rows) { - pConn = mndGetNextConn(pMnode, pShow->pIter); - if (pConn == NULL) { - pShow->pIter = NULL; - break; - } + int32_t i = offset; + for (; i < numOfQueries && (i - offset) < rowsToPack; ++i) { + int32_t curRowIndex = pBlock->info.rows; + SQueryDesc *pQuery = taosArrayGet(pConn->pQueries, i); + cols = 0; - taosRLockLatch(&pConn->queryLock); - if (NULL == pConn->pQueries || taosArrayGetSize(pConn->pQueries) <= 0) { - taosRUnLockLatch(&pConn->queryLock); - continue; - } + char queryId[26 + VARSTR_HEADER_SIZE] = {0}; + sprintf(&queryId[VARSTR_HEADER_SIZE], "%x:%" PRIx64, pConn->id, pQuery->reqRid); + varDataLen(queryId) = strlen(&queryId[VARSTR_HEADER_SIZE]); + SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataSetVal(pColInfo, curRowIndex, (const char *)queryId, false); + + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataSetVal(pColInfo, curRowIndex, (const char *)&pQuery->queryId, false); - int32_t numOfQueries = taosArrayGetSize(pConn->pQueries); - for (int32_t i = 0; i < numOfQueries && numOfRows < rows; ++i) { - SQueryDesc *pQuery = taosArrayGet(pConn->pQueries, i); - cols = 0; + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataSetVal(pColInfo, curRowIndex, (const char *)&pConn->id, false); - char queryId[26 + VARSTR_HEADER_SIZE] = {0}; - sprintf(&queryId[VARSTR_HEADER_SIZE], "%x:%" PRIx64, pConn->id, pQuery->reqRid); - varDataLen(queryId) = strlen(&queryId[VARSTR_HEADER_SIZE]); - SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataSetVal(pColInfo, numOfRows, (const char *)queryId, false); + char app[TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE]; + STR_TO_VARSTR(app, pConn->app); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataSetVal(pColInfo, curRowIndex, (const char *)app, false); - pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataSetVal(pColInfo, numOfRows, (const char *)&pQuery->queryId, false); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataSetVal(pColInfo, curRowIndex, (const char *)&pConn->pid, false); - pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataSetVal(pColInfo, numOfRows, (const char *)&pConn->id, false); + char user[TSDB_USER_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_TO_VARSTR(user, pConn->user); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataSetVal(pColInfo, curRowIndex, (const char *)user, false); - char app[TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE]; - STR_TO_VARSTR(app, pConn->app); - pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataSetVal(pColInfo, numOfRows, (const char *)app, false); + char endpoint[TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE] = {0}; + sprintf(&endpoint[VARSTR_HEADER_SIZE], "%s:%d", taosIpStr(pConn->ip), pConn->port); + varDataLen(endpoint) = strlen(&endpoint[VARSTR_HEADER_SIZE]); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataSetVal(pColInfo, curRowIndex, (const char *)endpoint, false); - pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataSetVal(pColInfo, numOfRows, (const char *)&pConn->pid, false); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataSetVal(pColInfo, curRowIndex, (const char *)&pQuery->stime, false); - char user[TSDB_USER_LEN + VARSTR_HEADER_SIZE] = {0}; - STR_TO_VARSTR(user, pConn->user); - pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataSetVal(pColInfo, numOfRows, (const char *)user, false); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataSetVal(pColInfo, curRowIndex, (const char *)&pQuery->useconds, false); - char endpoint[TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE] = {0}; - sprintf(&endpoint[VARSTR_HEADER_SIZE], "%s:%d", taosIpStr(pConn->ip), pConn->port); - varDataLen(endpoint) = strlen(&endpoint[VARSTR_HEADER_SIZE]); - pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataSetVal(pColInfo, numOfRows, (const char *)endpoint, false); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataSetVal(pColInfo, curRowIndex, (const char *)&pQuery->stableQuery, false); - pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataSetVal(pColInfo, numOfRows, (const char *)&pQuery->stime, false); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataSetVal(pColInfo, curRowIndex, (const char *)&pQuery->subPlanNum, false); + + char subStatus[TSDB_SHOW_SUBQUERY_LEN + VARSTR_HEADER_SIZE] = {0}; + int32_t strSize = sizeof(subStatus); + int32_t offset = VARSTR_HEADER_SIZE; + for (int32_t i = 0; i < pQuery->subPlanNum && offset < strSize; ++i) { + if (i) { + offset += snprintf(subStatus + offset, strSize - offset - 1, ","); + } + SQuerySubDesc *pDesc = taosArrayGet(pQuery->subDesc, i); + offset += snprintf(subStatus + offset, strSize - offset - 1, "%" PRIu64 ":%s", pDesc->tid, pDesc->status); + } + varDataLen(subStatus) = strlen(&subStatus[VARSTR_HEADER_SIZE]); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataSetVal(pColInfo, curRowIndex, subStatus, false); - pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataSetVal(pColInfo, numOfRows, (const char *)&pQuery->useconds, false); + char sql[TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_TO_VARSTR(sql, pQuery->sql); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataSetVal(pColInfo, curRowIndex, (const char *)sql, false); - pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataSetVal(pColInfo, numOfRows, (const char *)&pQuery->stableQuery, false); + pBlock->info.rows++; + } - pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataSetVal(pColInfo, numOfRows, (const char *)&pQuery->isSubQuery, false); + taosRUnLockLatch(&pConn->queryLock); + return i - offset; +} - pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataSetVal(pColInfo, numOfRows, (const char *)&pQuery->subPlanNum, false); +static int32_t mndRetrieveQueries(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows) { + SMnode * pMnode = pReq->info.node; + SSdb * pSdb = pMnode->pSdb; + int32_t numOfRows = 0; + SConnObj *pConn = NULL; - char subStatus[TSDB_SHOW_SUBQUERY_LEN + VARSTR_HEADER_SIZE] = {0}; - int32_t strSize = sizeof(subStatus); - int32_t offset = VARSTR_HEADER_SIZE; - for (int32_t i = 0; i < pQuery->subPlanNum && offset < strSize; ++i) { - if (i) { - offset += snprintf(subStatus + offset, strSize - offset - 1, ","); - } - SQuerySubDesc *pDesc = taosArrayGet(pQuery->subDesc, i); - offset += snprintf(subStatus + offset, strSize - offset - 1, "%" PRIu64 ":%s", pDesc->tid, pDesc->status); - } - varDataLen(subStatus) = strlen(&subStatus[VARSTR_HEADER_SIZE]); - pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataSetVal(pColInfo, numOfRows, subStatus, false); + if (pShow->pIter == NULL) { + SProfileMgmt *pMgmt = &pMnode->profileMgmt; + pShow->pIter = taosCacheCreateIter(pMgmt->connCache); + } - char sql[TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE] = {0}; - STR_TO_VARSTR(sql, pQuery->sql); - pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataSetVal(pColInfo, numOfRows, (const char *)sql, false); + // means fetched some data last time for this conn + if (pShow->curIterPackedRows > 0) { + size_t len = 0; + pConn = taosCacheIterGetData(pShow->pIter, &len); + if (pConn && (taosArrayGetSize(pConn->pQueries) > pShow->curIterPackedRows)) { + numOfRows = packQueriesIntoBlock(pShow, pConn, pBlock, pShow->curIterPackedRows, rows); + pShow->curIterPackedRows += numOfRows; + } + } - numOfRows++; + while (numOfRows < rows) { + pConn = mndGetNextConn(pMnode, pShow->pIter); + if (pConn == NULL) { + pShow->pIter = NULL; + break; } - taosRUnLockLatch(&pConn->queryLock); + int32_t packedRows = packQueriesIntoBlock(pShow, pConn, pBlock, 0, rows - numOfRows); + pShow->curIterPackedRows = packedRows; + numOfRows += packedRows; } - pShow->numOfRows += numOfRows; return numOfRows; } diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 60678f1a348e443690f22e5fe246810ba9e7458d..63f49cfe2be3a17ad99db4fa578c60eea6f7dc48 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -1075,7 +1075,7 @@ int32_t mndDropStreamByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) { return 0; } -static int32_t mndGetNumOfStreams(SMnode *pMnode, char *dbName, int32_t *pNumOfStreams) { +int32_t mndGetNumOfStreams(SMnode *pMnode, char *dbName, int32_t *pNumOfStreams) { SSdb *pSdb = pMnode->pSdb; SDbObj *pDb = mndAcquireDb(pMnode, dbName); if (pDb == NULL) { diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c index d3cb19231e565df33ffab44d6b749cd0b9416fe7..4bbe531bf8e1bb50598e0a801a0552817084a34e 100644 --- a/source/dnode/mnode/impl/src/mndTopic.c +++ b/source/dnode/mnode/impl/src/mndTopic.c @@ -377,6 +377,10 @@ static int32_t extractTopicTbInfo(SNode *pAst, SMqTopicObj *pTopic) { static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq *pCreate, SDbObj *pDb, const char *userName) { mInfo("start to create topic:%s", pCreate->name); + STrans *pTrans = NULL; + int32_t code = -1; + SNode *pAst = NULL; + SQueryPlan *pPlan = NULL; SMqTopicObj topicObj = {0}; tstrncpy(topicObj.name, pCreate->name, TSDB_TOPIC_FNAME_LEN); @@ -401,7 +405,7 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq * if (pCreate->withMeta) { terrno = TSDB_CODE_MND_INVALID_TOPIC_OPTION; mError("topic:%s, failed to create since %s", pCreate->name, terrstr()); - return -1; + goto _OUT; } topicObj.ast = taosStrdup(pCreate->ast); @@ -409,32 +413,21 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq * qDebugL("topic:%s ast %s", topicObj.name, topicObj.ast); - SNode *pAst = NULL; if (nodesStringToNode(pCreate->ast, &pAst) != 0) { - taosMemoryFree(topicObj.ast); - taosMemoryFree(topicObj.sql); mError("topic:%s, failed to create since %s", pCreate->name, terrstr()); - return -1; + goto _OUT; } - SQueryPlan *pPlan = NULL; - SPlanContext cxt = {.pAstRoot = pAst, .topicQuery = true}; if (qCreateQueryPlan(&cxt, &pPlan, NULL) != 0) { mError("failed to create topic:%s since %s", pCreate->name, terrstr()); - taosMemoryFree(topicObj.ast); - taosMemoryFree(topicObj.sql); - nodesDestroyNode(pAst); - return -1; + goto _OUT; } topicObj.ntbColIds = taosArrayInit(0, sizeof(int16_t)); if (topicObj.ntbColIds == NULL) { - taosMemoryFree(topicObj.ast); - taosMemoryFree(topicObj.sql); - nodesDestroyNode(pAst); terrno = TSDB_CODE_OUT_OF_MEMORY; - return -1; + goto _OUT; } extractTopicTbInfo(pAst, &topicObj); @@ -446,25 +439,18 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq * if (qExtractResultSchema(pAst, &topicObj.schema.nCols, &topicObj.schema.pSchema) != 0) { mError("topic:%s, failed to create since %s", pCreate->name, terrstr()); - taosMemoryFree(topicObj.ast); - taosMemoryFree(topicObj.sql); - nodesDestroyNode(pAst); - return -1; + goto _OUT; } if (nodesNodeToString((SNode *)pPlan, false, &topicObj.physicalPlan, NULL) != 0) { mError("topic:%s, failed to create since %s", pCreate->name, terrstr()); - taosMemoryFree(topicObj.ast); - taosMemoryFree(topicObj.sql); - return -1; + goto _OUT; } - nodesDestroyNode(pAst); - nodesDestroyNode((SNode *)pPlan); } else if (pCreate->subType == TOPIC_SUB_TYPE__TABLE) { SStbObj *pStb = mndAcquireStb(pMnode, pCreate->subStbName); if (pStb == NULL) { terrno = TSDB_CODE_MND_STB_NOT_EXIST; - return -1; + goto _OUT; } strcpy(topicObj.stbName, pCreate->subStbName); @@ -483,23 +469,22 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq * /*topicObj.withTbName = 1;*/ /*topicObj.withSchema = 1;*/ - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq, "create-topic"); + pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pReq, "create-topic"); if (pTrans == NULL) { mError("topic:%s, failed to create since %s", pCreate->name, terrstr()); - taosMemoryFreeClear(topicObj.ast); - taosMemoryFreeClear(topicObj.sql); - taosMemoryFreeClear(topicObj.physicalPlan); - return -1; + goto _OUT; } + mndTransSetDbName(pTrans, pDb->name, NULL); + if (mndTransCheckConflict(pMnode, pTrans) != 0) { + goto _OUT; + } mInfo("trans:%d to create topic:%s", pTrans->id, pCreate->name); SSdbRaw *pCommitRaw = mndTopicActionEncode(&topicObj); if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) { mError("trans:%d, failed to append commit log since %s", pTrans->id, terrstr()); - taosMemoryFreeClear(topicObj.physicalPlan); - mndTransDrop(pTrans); - return -1; + goto _OUT; } (void)sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY); @@ -528,17 +513,16 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq * tEncodeSize(tEncodeSTqCheckInfo, &info, len, code); if (code < 0) { sdbRelease(pSdb, pVgroup); - mndTransDrop(pTrans); - return -1; + goto _OUT; } void *buf = taosMemoryCalloc(1, sizeof(SMsgHead) + len); void *abuf = POINTER_SHIFT(buf, sizeof(SMsgHead)); SEncoder encoder; tEncoderInit(&encoder, abuf, len); if (tEncodeSTqCheckInfo(&encoder, &info) < 0) { + taosMemoryFree(buf); sdbRelease(pSdb, pVgroup); - mndTransDrop(pTrans); - return -1; + goto _OUT; } tEncoderClear(&encoder); ((SMsgHead *)buf)->vgId = htonl(pVgroup->vgId); @@ -551,32 +535,32 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq * if (mndTransAppendRedoAction(pTrans, &action) != 0) { taosMemoryFree(buf); sdbRelease(pSdb, pVgroup); - mndTransDrop(pTrans); - return -1; + goto _OUT; } - + buf = NULL; sdbRelease(pSdb, pVgroup); } } if (mndTransPrepare(pMnode, pTrans) != 0) { mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr()); - taosMemoryFreeClear(topicObj.physicalPlan); - mndTransDrop(pTrans); - return -1; + goto _OUT; } + code = TSDB_CODE_ACTION_IN_PROGRESS; + +_OUT: taosMemoryFreeClear(topicObj.physicalPlan); taosMemoryFreeClear(topicObj.sql); taosMemoryFreeClear(topicObj.ast); taosArrayDestroy(topicObj.ntbColIds); - if (topicObj.schema.nCols) { taosMemoryFreeClear(topicObj.schema.pSchema); } - + nodesDestroyNode(pAst); + nodesDestroyNode((SNode *)pPlan); mndTransDrop(pTrans); - return TSDB_CODE_ACTION_IN_PROGRESS; + return code; } static int32_t mndProcessCreateTopicReq(SRpcMsg *pReq) { diff --git a/source/dnode/mnode/impl/src/mndVgroup.c b/source/dnode/mnode/impl/src/mndVgroup.c index fe588b8f3b10b4110a5c8c843aaab658bee8b175..a82e49f397abe38164e67f89f214c822ae5afdaa 100644 --- a/source/dnode/mnode/impl/src/mndVgroup.c +++ b/source/dnode/mnode/impl/src/mndVgroup.c @@ -20,10 +20,12 @@ #include "mndMnode.h" #include "mndPrivilege.h" #include "mndShow.h" +#include "mndStb.h" +#include "mndStream.h" +#include "mndTopic.h" #include "mndTrans.h" #include "mndUser.h" #include "tmisce.h" -#include "mndStb.h" #define VGROUP_VER_NUMBER 1 #define VGROUP_RESERVE_SIZE 64 @@ -2290,6 +2292,24 @@ int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj *pVgro SDbObj dbObj = {0}; SArray *pArray = mndBuildDnodesArray(pMnode, 0); + int32_t numOfTopics = 0; + if (mndGetNumOfTopics(pMnode, pDb->name, &numOfTopics) != 0) { + goto _OVER; + } + if (numOfTopics > 0) { + terrno = TSDB_CODE_MND_TOPIC_MUST_BE_DELETED; + goto _OVER; + } + + int32_t numOfStreams = 0; + if (mndGetNumOfStreams(pMnode, pDb->name, &numOfStreams) != 0) { + goto _OVER; + } + if (numOfStreams > 0) { + terrno = TSDB_CODE_MND_STREAM_MUST_BE_DELETED; + goto _OVER; + } + pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq, "split-vgroup"); if (pTrans == NULL) goto _OVER; mndTransSetSerial(pTrans); diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 9f828d00f5f8ad129e6f768ecce9a37091b11d22..a4dad8f96aa7725b0acc19b24239bc3fa3bd62ee 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -81,54 +81,50 @@ IF (TD_VNODE_PLUGINS) ) ENDIF () -IF (NOT ${TD_LINUX}) -target_include_directories( - vnode - PUBLIC "inc" - PUBLIC "src/inc" - PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar" - PUBLIC "${TD_SOURCE_DIR}/contrib/rocksdb/include" -) -ELSE() -target_include_directories( - vnode - PUBLIC "inc" - PUBLIC "src/inc" - PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar" -) -ENDIF (NOT ${TD_LINUX}) - -IF (TD_LINUX) -target_include_directories( - vnode - PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/rocksdb_static" +# IF (NOT ${TD_LINUX}) +# target_include_directories( +# vnode +# PUBLIC "inc" +# PUBLIC "src/inc" +# PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar" +# PUBLIC "${TD_SOURCE_DIR}/contrib/rocksdb/include" +# ) +# ELSE() +# target_include_directories( +# vnode +# PUBLIC "inc" +# PUBLIC "src/inc" +# PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar" +# ) +#ENDIF(NOT ${TD_LINUX}) + +if (${BUILD_CONTRIB}) + target_include_directories( + vnode + PUBLIC "inc" + PUBLIC "src/inc" + PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar" + PUBLIC "${TD_SOURCE_DIR}/contrib/rocksdb/include" ) - - target_link_directories( - vnode - PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/rocksdb_static" +else() + target_include_directories( + vnode + PUBLIC "inc" + PUBLIC "src/inc" + PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar" ) -target_link_libraries( - vnode - PUBLIC os - PUBLIC util - PUBLIC common - PUBLIC tfs - PUBLIC wal - PUBLIC qworker - PUBLIC sync - PUBLIC executor - PUBLIC scheduler - PUBLIC tdb + if (${TD_LINUX}) + target_include_directories( + vnode + PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/rocksdb_static" + ) + target_link_directories( + vnode + PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/rocksdb_static" + ) + endif() +endif() - # PUBLIC bdb - # PUBLIC scalar - PUBLIC rocksdb - PUBLIC transport - PUBLIC stream - PUBLIC index -) -ELSE() target_link_libraries( vnode PUBLIC os @@ -149,7 +145,6 @@ target_link_libraries( PUBLIC stream PUBLIC index ) -ENDIF() IF (TD_GRANT) TARGET_LINK_LIBRARIES(vnode PUBLIC grant) diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h index 9df95a379a47ae15f9014d12ac439b31d35779a4..d7694ebfd5abc9b3cc7cf1a36d3936182025118b 100644 --- a/source/dnode/vnode/src/inc/tsdb.h +++ b/source/dnode/vnode/src/inc/tsdb.h @@ -357,19 +357,25 @@ typedef struct { STSchema *pTSchema; } SRocksCache; +typedef struct { + STsdb *pTsdb; + int flush_count; +} SCacheFlushState; + struct STsdb { - char *path; - SVnode *pVnode; - STsdbKeepCfg keepCfg; - TdThreadRwlock rwLock; - SMemTable *mem; - SMemTable *imem; - STsdbFS fs; - SLRUCache *lruCache; - TdThreadMutex lruMutex; - SLRUCache *biCache; - TdThreadMutex biMutex; - SRocksCache rCache; + char *path; + SVnode *pVnode; + STsdbKeepCfg keepCfg; + TdThreadRwlock rwLock; + SMemTable *mem; + SMemTable *imem; + STsdbFS fs; + SLRUCache *lruCache; + SCacheFlushState flushState; + TdThreadMutex lruMutex; + SLRUCache *biCache; + TdThreadMutex biMutex; + SRocksCache rCache; }; struct TSDBKEY { diff --git a/source/dnode/vnode/src/meta/metaCache.c b/source/dnode/vnode/src/meta/metaCache.c index 8749b3ac9464ed0e446e59e7db409bc09d3d600d..078e6ee6af08641f45c4ded20597047159352306 100644 --- a/source/dnode/vnode/src/meta/metaCache.c +++ b/source/dnode/vnode/src/meta/metaCache.c @@ -151,7 +151,6 @@ int32_t metaCacheOpen(SMeta* pMeta) { taosHashSetFreeFp(pCache->sTagFilterResCache.pTableEntry, freeCacheEntryFp); taosThreadMutexInit(&pCache->sTagFilterResCache.lock, NULL); - pCache->STbGroupResCache.pResCache = taosLRUCacheInit(5 * 1024 * 1024, -1, 0.5); if (pCache->STbGroupResCache.pResCache == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; @@ -169,7 +168,6 @@ int32_t metaCacheOpen(SMeta* pMeta) { taosHashSetFreeFp(pCache->STbGroupResCache.pTableEntry, freeCacheEntryFp); taosThreadMutexInit(&pCache->STbGroupResCache.lock, NULL); - pMeta->pCache = pCache; return code; @@ -486,14 +484,14 @@ static int checkAllEntriesInCache(const STagFilterResEntry* pEntry, SArray* pInv } static FORCE_INLINE void setMD5DigestInKey(uint64_t* pBuf, const char* key, int32_t keyLen) { -// ASSERT(keyLen == sizeof(int64_t) * 2); + // ASSERT(keyLen == sizeof(int64_t) * 2); memcpy(&pBuf[2], key, keyLen); } // the format of key: // hash table address(8bytes) + suid(8bytes) + MD5 digest(16bytes) static void initCacheKey(uint64_t* buf, const SHashObj* pHashMap, uint64_t suid, const char* key, int32_t keyLen) { - buf[0] = (uint64_t) pHashMap; + buf[0] = (uint64_t)pHashMap; buf[1] = suid; setMD5DigestInKey(buf, key, keyLen); ASSERT(keyLen == sizeof(uint64_t) * 2); @@ -501,7 +499,7 @@ static void initCacheKey(uint64_t* buf, const SHashObj* pHashMap, uint64_t suid, int32_t metaGetCachedTableUidList(void* pVnode, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray* pList1, bool* acquireRes) { - SMeta* pMeta = ((SVnode*)pVnode)->pMeta; + SMeta* pMeta = ((SVnode*)pVnode)->pMeta; int32_t vgId = TD_VID(pMeta->pVnode); // generate the composed key for LRU cache @@ -541,7 +539,8 @@ int32_t metaGetCachedTableUidList(void* pVnode, tb_uid_t suid, const uint8_t* pK uint32_t acc = pMeta->pCache->sTagFilterResCache.accTimes; if ((*pEntry)->hitTimes % 5000 == 0 && (*pEntry)->hitTimes > 0) { - metaInfo("vgId:%d cache hit:%d, total acc:%d, rate:%.2f", vgId, (*pEntry)->hitTimes, acc, ((double)(*pEntry)->hitTimes) / acc); + metaInfo("vgId:%d cache hit:%d, total acc:%d, rate:%.2f", vgId, (*pEntry)->hitTimes, acc, + ((double)(*pEntry)->hitTimes) / acc); } taosLRUCacheRelease(pCache, pHandle, false); @@ -551,7 +550,8 @@ int32_t metaGetCachedTableUidList(void* pVnode, tb_uid_t suid, const uint8_t* pK return TSDB_CODE_SUCCESS; } -static void freeUidCachePayload(const void* key, size_t keyLen, void* value) { +static void freeUidCachePayload(const void* key, size_t keyLen, void* value, void* ud) { + (void)ud; if (value == NULL) { return; } @@ -607,7 +607,7 @@ static int32_t addNewEntry(SHashObj* pTableEntry, const void* pKey, int32_t keyL int32_t metaUidFilterCachePut(void* pVnode, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload, int32_t payloadLen, double selectivityRatio) { int32_t code = 0; - SMeta* pMeta = ((SVnode*)pVnode)->pMeta; + SMeta* pMeta = ((SVnode*)pVnode)->pMeta; int32_t vgId = TD_VID(pMeta->pVnode); if (selectivityRatio > tsSelectivityRatio) { @@ -640,7 +640,7 @@ int32_t metaUidFilterCachePut(void* pVnode, uint64_t suid, const void* pKey, int if (code != TSDB_CODE_SUCCESS) { goto _end; } - } else { // check if it exists or not + } else { // check if it exists or not size_t size = listNEles(&(*pEntry)->list); if (size == 0) { tdListAppend(&(*pEntry)->list, pKey); @@ -659,7 +659,7 @@ int32_t metaUidFilterCachePut(void* pVnode, uint64_t suid, const void* pKey, int // add to cache. taosLRUCacheInsert(pCache, key, TAG_FILTER_RES_KEY_LEN, pPayload, payloadLen, freeUidCachePayload, NULL, - TAOS_LRU_PRIORITY_LOW); + TAOS_LRU_PRIORITY_LOW, NULL); _end: taosThreadMutexUnlock(pLock); metaDebug("vgId:%d, suid:%" PRIu64 " list cache added into cache, total:%d, tables:%d", vgId, suid, @@ -675,7 +675,7 @@ int32_t metaUidCacheClear(SMeta* pMeta, uint64_t suid) { SHashObj* pEntryHashMap = pMeta->pCache->sTagFilterResCache.pTableEntry; uint64_t dummy[2] = {0}; - initCacheKey(p, pEntryHashMap, suid, (char*) &dummy[0], 16); + initCacheKey(p, pEntryHashMap, suid, (char*)&dummy[0], 16); TdThreadMutex* pLock = &pMeta->pCache->sTagFilterResCache.lock; taosThreadMutexLock(pLock); @@ -700,12 +700,12 @@ int32_t metaUidCacheClear(SMeta* pMeta, uint64_t suid) { tdListEmpty(&(*pEntry)->list); taosThreadMutexUnlock(pLock); - metaDebug("vgId:%d suid:%"PRId64" cached related tag filter uid list cleared", vgId, suid); + metaDebug("vgId:%d suid:%" PRId64 " cached related tag filter uid list cleared", vgId, suid); return TSDB_CODE_SUCCESS; } int32_t metaGetCachedTbGroup(void* pVnode, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray** pList) { - SMeta* pMeta = ((SVnode*)pVnode)->pMeta; + SMeta* pMeta = ((SVnode*)pVnode)->pMeta; int32_t vgId = TD_VID(pMeta->pVnode); // generate the composed key for LRU cache @@ -738,7 +738,8 @@ int32_t metaGetCachedTbGroup(void* pVnode, tb_uid_t suid, const uint8_t* pKey, i uint32_t acc = pMeta->pCache->STbGroupResCache.accTimes; if ((*pEntry)->hitTimes % 5000 == 0 && (*pEntry)->hitTimes > 0) { - metaInfo("vgId:%d tb group cache hit:%d, total acc:%d, rate:%.2f", vgId, (*pEntry)->hitTimes, acc, ((double)(*pEntry)->hitTimes) / acc); + metaInfo("vgId:%d tb group cache hit:%d, total acc:%d, rate:%.2f", vgId, (*pEntry)->hitTimes, acc, + ((double)(*pEntry)->hitTimes) / acc); } taosLRUCacheRelease(pCache, pHandle, false); @@ -748,8 +749,8 @@ int32_t metaGetCachedTbGroup(void* pVnode, tb_uid_t suid, const uint8_t* pKey, i return TSDB_CODE_SUCCESS; } - -static void freeTbGroupCachePayload(const void* key, size_t keyLen, void* value) { +static void freeTbGroupCachePayload(const void* key, size_t keyLen, void* value, void* ud) { + (void)ud; if (value == NULL) { return; } @@ -778,8 +779,8 @@ static void freeTbGroupCachePayload(const void* key, size_t keyLen, void* value) taosMemoryFree(tmp); double el = (taosGetTimestampUs() - st) / 1000.0; - metaDebug("clear one item in tb group cache, remain cached item:%d, elapsed time:%.2fms", listNEles(&((*pEntry)->list)), - el); + metaDebug("clear one item in tb group cache, remain cached item:%d, elapsed time:%.2fms", + listNEles(&((*pEntry)->list)), el); break; } } @@ -788,11 +789,10 @@ static void freeTbGroupCachePayload(const void* key, size_t keyLen, void* value) taosArrayDestroy((SArray*)value); } - int32_t metaPutTbGroupToCache(void* pVnode, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload, int32_t payloadLen) { int32_t code = 0; - SMeta* pMeta = ((SVnode*)pVnode)->pMeta; + SMeta* pMeta = ((SVnode*)pVnode)->pMeta; int32_t vgId = TD_VID(pMeta->pVnode); if (payloadLen > tsTagFilterResCacheSize) { @@ -817,7 +817,7 @@ int32_t metaPutTbGroupToCache(void* pVnode, uint64_t suid, const void* pKey, int if (code != TSDB_CODE_SUCCESS) { goto _end; } - } else { // check if it exists or not + } else { // check if it exists or not size_t size = listNEles(&(*pEntry)->list); if (size == 0) { tdListAppend(&(*pEntry)->list, pKey); @@ -836,7 +836,7 @@ int32_t metaPutTbGroupToCache(void* pVnode, uint64_t suid, const void* pKey, int // add to cache. taosLRUCacheInsert(pCache, key, TAG_FILTER_RES_KEY_LEN, pPayload, payloadLen, freeTbGroupCachePayload, NULL, - TAOS_LRU_PRIORITY_LOW); + TAOS_LRU_PRIORITY_LOW, NULL); _end: taosThreadMutexUnlock(pLock); metaDebug("vgId:%d, suid:%" PRIu64 " tb group added into cache, total:%d, tables:%d", vgId, suid, @@ -852,7 +852,7 @@ int32_t metaTbGroupCacheClear(SMeta* pMeta, uint64_t suid) { SHashObj* pEntryHashMap = pMeta->pCache->STbGroupResCache.pTableEntry; uint64_t dummy[2] = {0}; - initCacheKey(p, pEntryHashMap, suid, (char*) &dummy[0], 16); + initCacheKey(p, pEntryHashMap, suid, (char*)&dummy[0], 16); TdThreadMutex* pLock = &pMeta->pCache->STbGroupResCache.lock; taosThreadMutexLock(pLock); @@ -877,8 +877,6 @@ int32_t metaTbGroupCacheClear(SMeta* pMeta, uint64_t suid) { tdListEmpty(&(*pEntry)->list); taosThreadMutexUnlock(pLock); - metaDebug("vgId:%d suid:%"PRId64" cached related tb group cleared", vgId, suid); + metaDebug("vgId:%d suid:%" PRId64 " cached related tb group cleared", vgId, suid); return TSDB_CODE_SUCCESS; } - - diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index de750aaa39cbbcfb304d0fe1f81da664377f45d7..f19fa54cbd85c3f6bda19f25a251950564cc50a6 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -706,7 +706,6 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg tqError("vgId:%d, build new consumer handle %s for consumer:0x%" PRIx64 ", but old consumerId:0x%" PRIx64, req.vgId, req.subKey, req.newConsumerId, req.oldConsumerId); } - if (req.newConsumerId == -1) { tqError("vgId:%d, tq invalid re-balance request, new consumerId %" PRId64 "", req.vgId, req.newConsumerId); goto end; diff --git a/source/dnode/vnode/src/tq/tqMeta.c b/source/dnode/vnode/src/tq/tqMeta.c index 900bb941213b94438a03c2430c2d84fd9eacde26..df1c9ca7c9cb82b7266604efb74b24fe86a88461 100644 --- a/source/dnode/vnode/src/tq/tqMeta.c +++ b/source/dnode/vnode/src/tq/tqMeta.c @@ -405,7 +405,7 @@ int32_t tqMetaRestoreHandle(STQ* pTq) { while (tdbTbcNext(pCur, &pKey, &kLen, &pVal, &vLen) == 0) { STqHandle handle = {0}; code = restoreHandle(pTq, pVal, vLen, &handle); - if (code < 0){ + if (code < 0) { tqDestroyTqHandle(&handle); break; } diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index 4ec66f82a60a7592eeb0eb29030e637b764446d3..31b13b8411127cfc742ed78bfcc56191126b93fc 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -14,6 +14,8 @@ */ #include "tsdb.h" +#define ROCKS_BATCH_SIZE (4096) + static int32_t tsdbOpenBICache(STsdb *pTsdb) { int32_t code = 0; SLRUCache *pCache = taosLRUCacheInit(10 * 1024 * 1024, 0, .5); @@ -213,7 +215,7 @@ static void tsdbCloseRocksCache(STsdb *pTsdb) { } static void rocksMayWrite(STsdb *pTsdb, bool force, bool read, bool lock) { - rocksdb_writebatch_t *wb = NULL; + rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch; if (read) { if (lock) { taosThreadMutexLock(&pTsdb->lruMutex); @@ -223,44 +225,33 @@ static void rocksMayWrite(STsdb *pTsdb, bool force, bool read, bool lock) { if (lock) { taosThreadMutexLock(&pTsdb->rCache.rMutex); } - wb = pTsdb->rCache.writebatch; } + int count = rocksdb_writebatch_count(wb); - if ((force && count > 0) || count >= 1024) { + if ((force && count > 0) || count >= ROCKS_BATCH_SIZE) { char *err = NULL; + rocksdb_write(pTsdb->rCache.db, pTsdb->rCache.writeoptions, wb, &err); if (NULL != err) { tsdbError("vgId:%d, %s failed at line %d, count: %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, count, err); rocksdb_free(err); + // pTsdb->flushState.flush_count = 0; } rocksdb_writebatch_clear(wb); } - if (read) { - if (lock) taosThreadMutexUnlock(&pTsdb->lruMutex); - } else { - if (lock) taosThreadMutexUnlock(&pTsdb->rCache.rMutex); - } -} -int32_t tsdbCacheCommit(STsdb *pTsdb) { - int32_t code = 0; - char *err = NULL; - - rocksMayWrite(pTsdb, true, false, true); - rocksMayWrite(pTsdb, true, true, true); - rocksdb_flush(pTsdb->rCache.db, pTsdb->rCache.flushoptions, &err); - if (NULL != err) { - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, err); - rocksdb_free(err); - code = -1; + if (lock) { + if (read) { + taosThreadMutexUnlock(&pTsdb->lruMutex); + } else { + taosThreadMutexUnlock(&pTsdb->rCache.rMutex); + } } - - return code; } -SLastCol *tsdbCacheDeserialize(char const *value) { +static SLastCol *tsdbCacheDeserialize(char const *value) { if (!value) { return NULL; } @@ -278,7 +269,7 @@ SLastCol *tsdbCacheDeserialize(char const *value) { return pLastCol; } -void tsdbCacheSerialize(SLastCol *pLastCol, char **value, size_t *size) { +static void tsdbCacheSerialize(SLastCol *pLastCol, char **value, size_t *size) { SColVal *pColVal = &pLastCol->colVal; size_t length = sizeof(*pLastCol); if (IS_VAR_DATA_TYPE(pColVal->type)) { @@ -300,6 +291,77 @@ void tsdbCacheSerialize(SLastCol *pLastCol, char **value, size_t *size) { *size = length; } +static void tsdbCachePutBatch(SLastCol *pLastCol, const void *key, size_t klen, SCacheFlushState *state) { + STsdb *pTsdb = state->pTsdb; + SRocksCache *rCache = &pTsdb->rCache; + rocksdb_writebatch_t *wb = rCache->writebatch; + char *rocks_value = NULL; + size_t vlen = 0; + + tsdbCacheSerialize(pLastCol, &rocks_value, &vlen); + + taosThreadMutexLock(&rCache->rMutex); + + rocksdb_writebatch_put(wb, (char *)key, klen, rocks_value, vlen); + + taosMemoryFree(rocks_value); + + if (++state->flush_count >= ROCKS_BATCH_SIZE) { + char *err = NULL; + + rocksdb_write(rCache->db, rCache->writeoptions, wb, &err); + if (NULL != err) { + tsdbError("vgId:%d, %s failed at line %d, count: %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + state->flush_count, err); + rocksdb_free(err); + } + + rocksdb_writebatch_clear(wb); + + state->flush_count = 0; + } + + taosThreadMutexUnlock(&rCache->rMutex); +} + +int tsdbCacheFlushDirty(const void *key, size_t klen, void *value, void *ud) { + SLastCol *pLastCol = (SLastCol *)value; + + if (pLastCol->dirty) { + tsdbCachePutBatch(pLastCol, key, klen, (SCacheFlushState *)ud); + + pLastCol->dirty = 0; + } + + return 0; +} + +int32_t tsdbCacheCommit(STsdb *pTsdb) { + int32_t code = 0; + char *err = NULL; + + SLRUCache *pCache = pTsdb->lruCache; + rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch; + + taosThreadMutexLock(&pTsdb->lruMutex); + + taosLRUCacheApply(pCache, tsdbCacheFlushDirty, &pTsdb->flushState); + + rocksMayWrite(pTsdb, true, false, false); + rocksMayWrite(pTsdb, true, true, false); + rocksdb_flush(pTsdb->rCache.db, pTsdb->rCache.flushoptions, &err); + + taosThreadMutexUnlock(&pTsdb->lruMutex); + + if (NULL != err) { + tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, err); + rocksdb_free(err); + code = -1; + } + + return code; +} + static SLastCol *tsdbCacheLookup(STsdb *pTsdb, tb_uid_t uid, int16_t cid, int8_t ltype) { SLastCol *pLastCol = NULL; @@ -329,21 +391,25 @@ static void reallocVarData(SColVal *pColVal) { } } -static void tsdbCacheDeleter(const void *key, size_t keyLen, void *value) { +static void tsdbCacheDeleter(const void *key, size_t klen, void *value, void *ud) { SLastCol *pLastCol = (SLastCol *)value; - // TODO: add dirty flag to SLastCol if (pLastCol->dirty) { - // TODO: queue into dirty list, free it after save to backstore - } else { - if (IS_VAR_DATA_TYPE(pLastCol->colVal.type) /* && pLastCol->colVal.value.nData > 0*/) { - taosMemoryFree(pLastCol->colVal.value.pData); - } + tsdbCachePutBatch(pLastCol, key, klen, (SCacheFlushState *)ud); + } - taosMemoryFree(value); + if (IS_VAR_DATA_TYPE(pLastCol->colVal.type) /* && pLastCol->colVal.value.nData > 0*/) { + taosMemoryFree(pLastCol->colVal.value.pData); } + + taosMemoryFree(value); } +typedef struct { + int idx; + SLastKey key; +} SIdxKey; + int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSDBROW *pRow) { int32_t code = 0; @@ -370,113 +436,206 @@ int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSDBROW *pRow tsdbRowClose(&iter); // 3, build keys & multi get from rocks - int num_keys = TARRAY_SIZE(aColVal); - char **keys_list = taosMemoryCalloc(num_keys * 2, sizeof(char *)); - size_t *keys_list_sizes = taosMemoryCalloc(num_keys * 2, sizeof(size_t)); - char *key_list = taosMemoryMalloc(num_keys * ROCKS_KEY_LEN * 2); + int num_keys = TARRAY_SIZE(aColVal); + TSKEY keyTs = TSDBROW_TS(pRow); + SArray *remainCols = NULL; + SLRUCache *pCache = pTsdb->lruCache; + + taosThreadMutexLock(&pTsdb->lruMutex); for (int i = 0; i < num_keys; ++i) { SColVal *pColVal = (SColVal *)taosArrayGet(aColVal, i); int16_t cid = pColVal->cid; - memcpy(key_list + i * ROCKS_KEY_LEN, &(SLastKey){.ltype = 1, .uid = uid, .cid = cid}, ROCKS_KEY_LEN); - memcpy(key_list + i * ROCKS_KEY_LEN + num_keys * ROCKS_KEY_LEN, &(SLastKey){.ltype = 0, .uid = uid, .cid = cid}, - ROCKS_KEY_LEN); - keys_list[i] = key_list + i * ROCKS_KEY_LEN; - keys_list[num_keys + i] = key_list + i * ROCKS_KEY_LEN + num_keys * ROCKS_KEY_LEN; - keys_list_sizes[i] = ROCKS_KEY_LEN; - keys_list_sizes[num_keys + i] = ROCKS_KEY_LEN; - } - char **values_list = taosMemoryCalloc(num_keys * 2, sizeof(char *)); - size_t *values_list_sizes = taosMemoryCalloc(num_keys * 2, sizeof(size_t)); - char **errs = taosMemoryCalloc(num_keys * 2, sizeof(char *)); - taosThreadMutexLock(&pTsdb->rCache.rMutex); - rocksdb_multi_get(pTsdb->rCache.db, pTsdb->rCache.readoptions, num_keys * 2, (const char *const *)keys_list, - keys_list_sizes, values_list, values_list_sizes, errs); - for (int i = 0; i < num_keys * 2; ++i) { - rocksdb_free(errs[i]); - } - taosMemoryFree(key_list); - taosMemoryFree(keys_list); - taosMemoryFree(keys_list_sizes); - taosMemoryFree(errs); + SLastKey *key = &(SLastKey){.ltype = 0, .uid = uid, .cid = cid}; + size_t klen = ROCKS_KEY_LEN; + LRUHandle *h = taosLRUCacheLookup(pCache, key, klen); + if (h) { + SLastCol *pLastCol = (SLastCol *)taosLRUCacheValue(pCache, h); - TSKEY keyTs = TSDBROW_TS(pRow); - rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch; - for (int i = 0; i < num_keys; ++i) { - SColVal *pColVal = (SColVal *)taosArrayGet(aColVal, i); + if (pLastCol->ts <= keyTs) { + uint8_t *pVal = NULL; + int nData = pLastCol->colVal.value.nData; + if (IS_VAR_DATA_TYPE(pColVal->type)) { + pVal = pLastCol->colVal.value.pData; + } + pLastCol->ts = keyTs; + pLastCol->colVal = *pColVal; + if (IS_VAR_DATA_TYPE(pColVal->type)) { + if (nData < pColVal->value.nData) { + taosMemoryFree(pVal); + pLastCol->colVal.value.pData = taosMemoryCalloc(1, pColVal->value.nData); + } else { + pLastCol->colVal.value.pData = pVal; + } + if (pColVal->value.nData) { + memcpy(pLastCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData); + } + } - // if (!COL_VAL_IS_NONE(pColVal)) { - SLastCol *pLastCol = tsdbCacheDeserialize(values_list[i + num_keys]); + if (!pLastCol->dirty) { + pLastCol->dirty = 1; + } + } - if (NULL == pLastCol || pLastCol->ts <= keyTs) { - char *value = NULL; - size_t vlen = 0; - tsdbCacheSerialize(&(SLastCol){.ts = keyTs, .colVal = *pColVal}, &value, &vlen); - SLastKey key = (SLastKey){.ltype = 0, .uid = uid, .cid = pColVal->cid}; - size_t klen = ROCKS_KEY_LEN; - rocksdb_writebatch_put(wb, (char *)&key, klen, value, vlen); + taosLRUCacheRelease(pCache, h, false); + } else { + if (!remainCols) { + remainCols = taosArrayInit(num_keys * 2, sizeof(SIdxKey)); + } + taosArrayPush(remainCols, &(SIdxKey){i, *key}); + } - pLastCol = (SLastCol *)value; - SLastCol *pTmpLastCol = taosMemoryCalloc(1, sizeof(SLastCol)); - *pTmpLastCol = *pLastCol; - pLastCol = pTmpLastCol; + if (COL_VAL_IS_VALUE(pColVal)) { + key->ltype = 1; + LRUHandle *h = taosLRUCacheLookup(pCache, key, klen); + if (h) { + SLastCol *pLastCol = (SLastCol *)taosLRUCacheValue(pCache, h); - reallocVarData(&pLastCol->colVal); - size_t charge = sizeof(*pLastCol); - if (IS_VAR_DATA_TYPE(pLastCol->colVal.type)) { - charge += pLastCol->colVal.value.nData; - } + if (pLastCol->ts <= keyTs) { + uint8_t *pVal = NULL; + int nData = pLastCol->colVal.value.nData; + if (IS_VAR_DATA_TYPE(pColVal->type)) { + pVal = pLastCol->colVal.value.pData; + } + pLastCol->ts = keyTs; + pLastCol->colVal = *pColVal; + if (IS_VAR_DATA_TYPE(pColVal->type)) { + if (nData < pColVal->value.nData) { + taosMemoryFree(pVal); + pLastCol->colVal.value.pData = taosMemoryCalloc(1, pColVal->value.nData); + } else { + pLastCol->colVal.value.pData = pVal; + } + if (pColVal->value.nData) { + memcpy(pLastCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData); + } + } - LRUStatus status = taosLRUCacheInsert(pTsdb->lruCache, &key, ROCKS_KEY_LEN, pLastCol, charge, tsdbCacheDeleter, - NULL, TAOS_LRU_PRIORITY_LOW); - if (status != TAOS_LRU_STATUS_OK) { - code = -1; + if (!pLastCol->dirty) { + pLastCol->dirty = 1; + } + } + + taosLRUCacheRelease(pCache, h, false); + } else { + if (!remainCols) { + remainCols = taosArrayInit(num_keys * 2, sizeof(SIdxKey)); + } + taosArrayPush(remainCols, &(SIdxKey){i, *key}); } + } + } - taosMemoryFree(value); + if (remainCols) { + num_keys = TARRAY_SIZE(remainCols); + } + if (remainCols && num_keys > 0) { + char **keys_list = taosMemoryCalloc(num_keys, sizeof(char *)); + size_t *keys_list_sizes = taosMemoryCalloc(num_keys, sizeof(size_t)); + for (int i = 0; i < num_keys; ++i) { + SIdxKey *idxKey = &((SIdxKey *)TARRAY_DATA(remainCols))[i]; + + keys_list[i] = (char *)&idxKey->key; + keys_list_sizes[i] = ROCKS_KEY_LEN; + } + char **values_list = taosMemoryCalloc(num_keys, sizeof(char *)); + size_t *values_list_sizes = taosMemoryCalloc(num_keys, sizeof(size_t)); + char **errs = taosMemoryCalloc(num_keys, sizeof(char *)); + rocksdb_multi_get(pTsdb->rCache.db, pTsdb->rCache.readoptions, num_keys, (const char *const *)keys_list, + keys_list_sizes, values_list, values_list_sizes, errs); + for (int i = 0; i < num_keys; ++i) { + rocksdb_free(errs[i]); } + taosMemoryFree(errs); + taosMemoryFree(keys_list); + taosMemoryFree(keys_list_sizes); + taosMemoryFree(values_list_sizes); + + rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch; + for (int i = 0; i < num_keys; ++i) { + SIdxKey *idxKey = &((SIdxKey *)TARRAY_DATA(remainCols))[i]; + SColVal *pColVal = (SColVal *)TARRAY_DATA(aColVal) + idxKey->idx; + // SColVal *pColVal = (SColVal *)taosArrayGet(aColVal, idxKey->idx); - if (COL_VAL_IS_VALUE(pColVal)) { SLastCol *pLastCol = tsdbCacheDeserialize(values_list[i]); - if (NULL == pLastCol || pLastCol->ts <= keyTs) { - char *value = NULL; - size_t vlen = 0; - tsdbCacheSerialize(&(SLastCol){.ts = keyTs, .colVal = *pColVal}, &value, &vlen); - SLastKey key = (SLastKey){.ltype = 1, .uid = uid, .cid = pColVal->cid}; + if (idxKey->key.ltype == 0) { + if (NULL == pLastCol || pLastCol->ts <= keyTs) { + char *value = NULL; + size_t vlen = 0; + tsdbCacheSerialize(&(SLastCol){.ts = keyTs, .colVal = *pColVal}, &value, &vlen); + // SLastKey key = (SLastKey){.ltype = 0, .uid = uid, .cid = pColVal->cid}; + taosThreadMutexLock(&pTsdb->rCache.rMutex); - rocksdb_writebatch_put(wb, (char *)&key, ROCKS_KEY_LEN, value, vlen); + rocksdb_writebatch_put(wb, (char *)&idxKey->key, ROCKS_KEY_LEN, value, vlen); - pLastCol = (SLastCol *)value; - SLastCol *pTmpLastCol = taosMemoryCalloc(1, sizeof(SLastCol)); - *pTmpLastCol = *pLastCol; - pLastCol = pTmpLastCol; + taosThreadMutexUnlock(&pTsdb->rCache.rMutex); - reallocVarData(&pLastCol->colVal); - size_t charge = sizeof(*pLastCol); - if (IS_VAR_DATA_TYPE(pLastCol->colVal.type)) { - charge += pLastCol->colVal.value.nData; - } + pLastCol = (SLastCol *)value; + SLastCol *pTmpLastCol = taosMemoryCalloc(1, sizeof(SLastCol)); + *pTmpLastCol = *pLastCol; + pLastCol = pTmpLastCol; - LRUStatus status = taosLRUCacheInsert(pTsdb->lruCache, &key, ROCKS_KEY_LEN, pLastCol, charge, tsdbCacheDeleter, - NULL, TAOS_LRU_PRIORITY_LOW); - if (status != TAOS_LRU_STATUS_OK) { - code = -1; + reallocVarData(&pLastCol->colVal); + size_t charge = sizeof(*pLastCol); + if (IS_VAR_DATA_TYPE(pLastCol->colVal.type)) { + charge += pLastCol->colVal.value.nData; + } + + LRUStatus status = taosLRUCacheInsert(pTsdb->lruCache, &idxKey->key, ROCKS_KEY_LEN, pLastCol, charge, + tsdbCacheDeleter, NULL, TAOS_LRU_PRIORITY_LOW, &pTsdb->flushState); + if (status != TAOS_LRU_STATUS_OK) { + code = -1; + } + + taosMemoryFree(value); } + } else { + if (COL_VAL_IS_VALUE(pColVal)) { + if (NULL == pLastCol || pLastCol->ts <= keyTs) { + char *value = NULL; + size_t vlen = 0; + tsdbCacheSerialize(&(SLastCol){.ts = keyTs, .colVal = *pColVal}, &value, &vlen); + // SLastKey key = (SLastKey){.ltype = 1, .uid = uid, .cid = pColVal->cid}; + taosThreadMutexLock(&pTsdb->rCache.rMutex); + + rocksdb_writebatch_put(wb, (char *)&idxKey->key, ROCKS_KEY_LEN, value, vlen); + + taosThreadMutexUnlock(&pTsdb->rCache.rMutex); + + pLastCol = (SLastCol *)value; + SLastCol *pTmpLastCol = taosMemoryCalloc(1, sizeof(SLastCol)); + *pTmpLastCol = *pLastCol; + pLastCol = pTmpLastCol; + + reallocVarData(&pLastCol->colVal); + size_t charge = sizeof(*pLastCol); + if (IS_VAR_DATA_TYPE(pLastCol->colVal.type)) { + charge += pLastCol->colVal.value.nData; + } - taosMemoryFree(value); + LRUStatus status = taosLRUCacheInsert(pTsdb->lruCache, &idxKey->key, ROCKS_KEY_LEN, pLastCol, charge, + tsdbCacheDeleter, NULL, TAOS_LRU_PRIORITY_LOW, &pTsdb->flushState); + if (status != TAOS_LRU_STATUS_OK) { + code = -1; + } + + taosMemoryFree(value); + } + } } + + rocksdb_free(values_list[i]); } - //} - rocksdb_free(values_list[i]); - rocksdb_free(values_list[i + num_keys]); + rocksMayWrite(pTsdb, true, false, true); + + taosMemoryFree(values_list); + + taosArrayDestroy(remainCols); } - taosMemoryFree(values_list); - taosMemoryFree(values_list_sizes); - rocksMayWrite(pTsdb, true, false, false); - taosThreadMutexUnlock(&pTsdb->rCache.rMutex); + taosThreadMutexUnlock(&pTsdb->lruMutex); _exit: taosArrayDestroy(aColVal); @@ -651,11 +810,6 @@ static SLastCol *tsdbCacheLoadCol(STsdb *pTsdb, SCacheRowsReader *pr, int16_t sl return pLastCol; } -typedef struct { - int idx; - SLastKey key; -} SIdxKey; - static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SArray *remainCols, SCacheRowsReader *pr, int8_t ltype) { int32_t code = 0; @@ -712,7 +866,7 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr } LRUStatus status = taosLRUCacheInsert(pCache, &idxKey->key, ROCKS_KEY_LEN, pLastCol, charge, tsdbCacheDeleter, NULL, - TAOS_LRU_PRIORITY_LOW); + TAOS_LRU_PRIORITY_LOW, &pTsdb->flushState); if (status != TAOS_LRU_STATUS_OK) { code = -1; } @@ -787,7 +941,7 @@ static int32_t tsdbCacheLoadFromRocks(STsdb *pTsdb, tb_uid_t uid, SArray *pLastA } LRUStatus status = taosLRUCacheInsert(pCache, &idxKey->key, ROCKS_KEY_LEN, pLastCol, charge, tsdbCacheDeleter, - NULL, TAOS_LRU_PRIORITY_LOW); + NULL, TAOS_LRU_PRIORITY_LOW, &pTsdb->flushState); if (status != TAOS_LRU_STATUS_OK) { code = -1; } @@ -833,9 +987,7 @@ int32_t tsdbCacheGetBatch(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCache reallocVarData(&lastCol.colVal); taosArrayPush(pLastArray, &lastCol); - if (h) { - taosLRUCacheRelease(pCache, h, false); - } + taosLRUCacheRelease(pCache, h, false); } else { SLastCol noneCol = {.ts = TSKEY_MIN, .colVal = COL_VAL_NONE(cid, pr->pSchema->columns[pr->pSlotIds[i]].type)}; @@ -860,9 +1012,7 @@ int32_t tsdbCacheGetBatch(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCache reallocVarData(&lastCol.colVal); taosArraySet(pLastArray, idxKey->idx, &lastCol); - if (h) { - taosLRUCacheRelease(pCache, h, false); - } + taosLRUCacheRelease(pCache, h, false); taosArrayRemove(remainCols, i); } else { @@ -906,7 +1056,7 @@ int32_t tsdbCacheGet(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCacheRowsR } LRUStatus status = taosLRUCacheInsert(pCache, key, ROCKS_KEY_LEN, pLastCol, charge, tsdbCacheDeleter, &h, - TAOS_LRU_PRIORITY_LOW); + TAOS_LRU_PRIORITY_LOW, &pTsdb->flushState); if (status != TAOS_LRU_STATUS_OK) { code = -1; } @@ -965,6 +1115,8 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE rocksMayWrite(pTsdb, true, false, false); rocksdb_multi_get(pTsdb->rCache.db, pTsdb->rCache.readoptions, num_keys * 2, (const char *const *)keys_list, keys_list_sizes, values_list, values_list_sizes, errs); + taosThreadMutexUnlock(&pTsdb->rCache.rMutex); + for (int i = 0; i < num_keys * 2; ++i) { if (errs[i]) { rocksdb_free(errs[i]); @@ -975,19 +1127,42 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch; for (int i = 0; i < num_keys; ++i) { SLastCol *pLastCol = tsdbCacheDeserialize(values_list[i]); + taosThreadMutexLock(&pTsdb->rCache.rMutex); if (NULL != pLastCol && (pLastCol->ts <= eKey && pLastCol->ts >= sKey)) { rocksdb_writebatch_delete(wb, keys_list[i], klen); } - taosLRUCacheErase(pTsdb->lruCache, keys_list[i], klen); - pLastCol = tsdbCacheDeserialize(values_list[i + num_keys]); if (NULL != pLastCol && (pLastCol->ts <= eKey && pLastCol->ts >= sKey)) { rocksdb_writebatch_delete(wb, keys_list[num_keys + i], klen); } - taosLRUCacheErase(pTsdb->lruCache, keys_list[num_keys + i], klen); + taosThreadMutexUnlock(&pTsdb->rCache.rMutex); rocksdb_free(values_list[i]); rocksdb_free(values_list[i + num_keys]); + + taosThreadMutexLock(&pTsdb->lruMutex); + + LRUHandle *h = taosLRUCacheLookup(pTsdb->lruCache, keys_list[i], klen); + if (h) { + SLastCol *pLastCol = (SLastCol *)taosLRUCacheValue(pTsdb->lruCache, h); + if (pLastCol->dirty) { + pLastCol->dirty = 0; + } + taosLRUCacheRelease(pTsdb->lruCache, h, true); + } + taosLRUCacheErase(pTsdb->lruCache, keys_list[i], klen); + + h = taosLRUCacheLookup(pTsdb->lruCache, keys_list[num_keys + i], klen); + if (h) { + SLastCol *pLastCol = (SLastCol *)taosLRUCacheValue(pTsdb->lruCache, h); + if (pLastCol->dirty) { + pLastCol->dirty = 0; + } + taosLRUCacheRelease(pTsdb->lruCache, h, true); + } + taosLRUCacheErase(pTsdb->lruCache, keys_list[num_keys + i], klen); + + taosThreadMutexUnlock(&pTsdb->lruMutex); } for (int i = 0; i < num_keys; ++i) { taosMemoryFree(keys_list[i]); @@ -997,8 +1172,7 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE taosMemoryFree(values_list); taosMemoryFree(values_list_sizes); - rocksMayWrite(pTsdb, true, false, false); - taosThreadMutexUnlock(&pTsdb->rCache.rMutex); + rocksMayWrite(pTsdb, true, false, true); _exit: taosMemoryFree(pTSchema); @@ -1011,7 +1185,7 @@ int32_t tsdbOpenCache(STsdb *pTsdb) { SLRUCache *pCache = NULL; size_t cfgCapacity = pTsdb->pVnode->config.cacheLastSize * 1024 * 1024; - pCache = taosLRUCacheInit(cfgCapacity, 1, .5); + pCache = taosLRUCacheInit(cfgCapacity, 0, .5); if (pCache == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; goto _err; @@ -1033,6 +1207,9 @@ int32_t tsdbOpenCache(STsdb *pTsdb) { taosThreadMutexInit(&pTsdb->lruMutex, NULL); + pTsdb->flushState.pTsdb = pTsdb; + pTsdb->flushState.flush_count = 0; + _err: pTsdb->lruCache = pCache; return code; @@ -1062,7 +1239,8 @@ static void getTableCacheKey(tb_uid_t uid, int cacheType, char *key, int *len) { *len = sizeof(uint64_t); } -static void deleteTableCacheLast(const void *key, size_t keyLen, void *value) { +static void deleteTableCacheLast(const void *key, size_t keyLen, void *value, void *ud) { + (void)ud; SArray *pLastArray = (SArray *)value; int16_t nCol = taosArrayGetSize(pLastArray); for (int16_t iCol = 0; iCol < nCol; ++iCol) { @@ -3146,7 +3324,8 @@ int32_t tsdbCacheGetLastrowH(SLRUCache *pCache, tb_uid_t uid, SCacheRowsReader * size_t charge = pArray->capacity * pArray->elemSize + sizeof(*pArray); _taos_lru_deleter_t deleter = deleteTableCacheLast; - LRUStatus status = taosLRUCacheInsert(pCache, key, keyLen, pArray, charge, deleter, &h, TAOS_LRU_PRIORITY_LOW); + LRUStatus status = + taosLRUCacheInsert(pCache, key, keyLen, pArray, charge, deleter, &h, TAOS_LRU_PRIORITY_LOW, NULL); if (status != TAOS_LRU_STATUS_OK) { code = -1; } @@ -3186,7 +3365,7 @@ int32_t tsdbCacheGetLastH(SLRUCache *pCache, tb_uid_t uid, SCacheRowsReader *pr, size_t charge = pLastArray->capacity * pLastArray->elemSize + sizeof(*pLastArray); _taos_lru_deleter_t deleter = deleteTableCacheLast; LRUStatus status = - taosLRUCacheInsert(pCache, key, keyLen, pLastArray, charge, deleter, &h, TAOS_LRU_PRIORITY_LOW); + taosLRUCacheInsert(pCache, key, keyLen, pLastArray, charge, deleter, &h, TAOS_LRU_PRIORITY_LOW, NULL); if (status != TAOS_LRU_STATUS_OK) { code = -1; } @@ -3259,7 +3438,8 @@ static int32_t tsdbCacheLoadBlockIdx(SDataFReader *pFileReader, SArray **aBlockI return code; } -static void deleteBICache(const void *key, size_t keyLen, void *value) { +static void deleteBICache(const void *key, size_t keyLen, void *value, void *ud) { + (void)ud; SArray *pArray = (SArray *)value; taosArrayDestroy(pArray); @@ -3290,7 +3470,8 @@ int32_t tsdbCacheGetBlockIdx(SLRUCache *pCache, SDataFReader *pFileReader, LRUHa size_t charge = pArray->capacity * pArray->elemSize + sizeof(*pArray); _taos_lru_deleter_t deleter = deleteBICache; - LRUStatus status = taosLRUCacheInsert(pCache, key, keyLen, pArray, charge, deleter, &h, TAOS_LRU_PRIORITY_LOW); + LRUStatus status = + taosLRUCacheInsert(pCache, key, keyLen, pArray, charge, deleter, &h, TAOS_LRU_PRIORITY_LOW, NULL); if (status != TAOS_LRU_STATUS_OK) { code = -1; } diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 2500015ec1d87d05667b63ca305eac6a16bfe605..84dcde06ac348a68b107eec97a79f48d3b8cbde0 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -2846,18 +2846,18 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) { setBlockAllDumped(pDumpInfo, pBlock->maxKey.ts, pReader->order); return code; } - + pBlockScanInfo = getTableBlockScanInfo(pReader->status.pTableMap, pBlockInfo->uid, pReader->idStr); if (pBlockScanInfo == NULL) { goto _end; } - TSDBKEY keyInBuf = getCurrentKeyInBuf(pBlockScanInfo, pReader); + TSDBKEY keyInBuf = getCurrentKeyInBuf(pBlockScanInfo, pReader); // it is a clean block, load it directly if (isCleanFileDataBlock(pReader, pBlockInfo, pBlock, pBlockScanInfo, keyInBuf, pLastBlockReader) && pBlock->nRow <= pReader->resBlockInfo.capacity) { - if (asc || (!hasDataInLastBlock(pLastBlockReader))) { + if (asc || (!hasDataInLastBlock(pLastBlockReader) && (pBlock->maxKey.ts > keyInBuf.ts))) { code = copyBlockDataToSDataBlock(pReader); if (code) { goto _end; diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index dc842310293d58b9cda25944f40ee52184ed43d3..57317643a87b72c8a682171b12fe7fe1ffcae936 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -107,6 +107,10 @@ static int32_t vnodePreProcessCreateTableMsg(SVnode *pVnode, SRpcMsg *pMsg) { _exit: tDecoderClear(&dc); + if (code) { + vError("vgId:%d, %s:%d failed to preprocess submit request since %s, msg type:%s", TD_VID(pVnode), __func__, lino, + tstrerror(code), TMSG_INFO(pMsg->msgType)); + } return code; } @@ -272,11 +276,11 @@ static int32_t vnodePreProcessSubmitMsg(SVnode *pVnode, SRpcMsg *pMsg) { tEndDecode(pCoder); _exit: + tDecoderClear(pCoder); if (code) { - vError("vgId:%d, failed to preprocess submit request since %s, msg type:%d", TD_VID(pVnode), tstrerror(code), - pMsg->msgType); + vError("vgId:%d, %s:%d failed to preprocess submit request since %s, msg type:%s", TD_VID(pVnode), __func__, lino, + tstrerror(code), TMSG_INFO(pMsg->msgType)); } - tDecoderClear(pCoder); return code; } @@ -367,8 +371,8 @@ int32_t vnodePreProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg) { _exit: if (code) { - vError("vgId:%d, failed to preprocess write request since %s, msg type:%d", TD_VID(pVnode), tstrerror(code), - pMsg->msgType); + vError("vgId:%d, failed to preprocess write request since %s, msg type:%s", TD_VID(pVnode), tstrerror(code), + TMSG_INFO(pMsg->msgType)); } return code; } diff --git a/source/libs/executor/inc/tsort.h b/source/libs/executor/inc/tsort.h index e0718a0c0a39473cfa020dcbf2f7801061ab8ed9..78c56c0405f6010efc370be8088a367ac12e0f42 100644 --- a/source/libs/executor/inc/tsort.h +++ b/source/libs/executor/inc/tsort.h @@ -170,6 +170,10 @@ SSortExecInfo tsortGetSortExecInfo(SSortHandle* pHandle); */ int32_t getProperSortPageSize(size_t rowSize, uint32_t numOfCols); + +bool tsortIsClosed(SSortHandle* pHandle); +void tsortSetClosed(SSortHandle* pHandle); + #ifdef __cplusplus } #endif diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 331a2fa7ab23023348c66922fd5edf8fc00f94d6..09280295571ac711d8b4cfaaac587e25c2d0733e 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -26,8 +26,8 @@ #include "executil.h" #include "executorInt.h" #include "querytask.h" -#include "tcompression.h" #include "storageapi.h" +#include "tcompression.h" typedef struct tagFilterAssist { SHashObj* colHash; @@ -42,13 +42,13 @@ typedef enum { } FilterCondType; static FilterCondType checkTagCond(SNode* cond); -static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* list, SNode* pTagCond, SStorageAPI* pAPI); -static int32_t optimizeTbnameInCondImpl(void* metaHandle, SArray* list, SNode* pTagCond, SStorageAPI* pStoreAPI); +static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* list, SNode* pTagCond, SStorageAPI* pAPI); +static int32_t optimizeTbnameInCondImpl(void* metaHandle, SArray* list, SNode* pTagCond, SStorageAPI* pStoreAPI); -static int32_t getTableList(void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, - SNode* pTagIndexCond, STableListInfo* pListInfo, uint8_t* digest, const char* idstr, SStorageAPI* pStorageAPI); -static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, - void* pVnode, SStorageAPI* pStorageAPI); +static int32_t getTableList(void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond, + STableListInfo* pListInfo, uint8_t* digest, const char* idstr, SStorageAPI* pStorageAPI); +static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* pVnode, + SStorageAPI* pStorageAPI); static int64_t getLimit(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->limit; } static int64_t getOffset(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->offset; } @@ -302,7 +302,7 @@ EDealRes doTranslateTagExpr(SNode** pNode, void* pContext) { return DEAL_RES_CONTINUE; } -int32_t isQualifiedTable(STableKeyInfo* info, SNode* pTagCond, void* metaHandle, bool* pQualified, SStorageAPI *pAPI) { +int32_t isQualifiedTable(STableKeyInfo* info, SNode* pTagCond, void* metaHandle, bool* pQualified, SStorageAPI* pAPI) { int32_t code = TSDB_CODE_SUCCESS; SMetaReader mr = {0}; @@ -495,7 +495,8 @@ int32_t getColInfoResultForGroupby(void* pVnode, SNodeList* group, STableListInf genTbGroupDigest((SNode*)listNode, digest, &context); nodesFree(listNode); - pAPI->metaFn.metaGetCachedTbGroup(pVnode, pTableListInfo->idInfo.suid, context.digest, tListLen(context.digest), &tableList); + pAPI->metaFn.metaGetCachedTbGroup(pVnode, pTableListInfo->idInfo.suid, context.digest, tListLen(context.digest), + &tableList); if (tableList) { taosArrayDestroy(pTableListInfo->pTableList); pTableListInfo->pTableList = tableList; @@ -632,7 +633,8 @@ int32_t getColInfoResultForGroupby(void* pVnode, SNodeList* group, STableListInf if (tsTagFilterCache) { tableList = taosArrayDup(pTableListInfo->pTableList, NULL); - pAPI->metaFn.metaPutTbGroupToCache(pVnode, pTableListInfo->idInfo.suid, context.digest, tListLen(context.digest), tableList, taosArrayGetSize(tableList) * sizeof(STableKeyInfo)); + pAPI->metaFn.metaPutTbGroupToCache(pVnode, pTableListInfo->idInfo.suid, context.digest, tListLen(context.digest), + tableList, taosArrayGetSize(tableList) * sizeof(STableKeyInfo)); } // int64_t st2 = taosGetTimestampUs(); @@ -776,7 +778,8 @@ static int32_t optimizeTbnameInCond(void* pVnode, int64_t suid, SArray* list, SN } // only return uid that does not contained in pExistedUidList -static int32_t optimizeTbnameInCondImpl(void* pVnode, SArray* pExistedUidList, SNode* pTagCond, SStorageAPI* pStoreAPI) { +static int32_t optimizeTbnameInCondImpl(void* pVnode, SArray* pExistedUidList, SNode* pTagCond, + SStorageAPI* pStoreAPI) { if (nodeType(pTagCond) != QUERY_NODE_OPERATOR) { return -1; } @@ -839,8 +842,8 @@ static int32_t optimizeTbnameInCondImpl(void* pVnode, SArray* pExistedUidList, S return -1; } -static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, - void* pVnode, SStorageAPI* pStorageAPI) { +static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* pVnode, + SStorageAPI* pStorageAPI) { SSDataBlock* pResBlock = createDataBlock(); if (pResBlock == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -1080,8 +1083,8 @@ int32_t getTableList(void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, S genTagFilterDigest(pTagCond, &context); bool acquired = false; - pStorageAPI->metaFn.getCachedTableList(pVnode, pScanNode->suid, context.digest, tListLen(context.digest), pUidList, - &acquired); + pStorageAPI->metaFn.getCachedTableList(pVnode, pScanNode->suid, context.digest, tListLen(context.digest), + pUidList, &acquired); if (acquired) { digest[0] = 1; memcpy(digest + 1, context.digest, tListLen(context.digest)); @@ -1097,13 +1100,15 @@ int32_t getTableList(void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, S if (pTagIndexCond) { void* pIndex = pStorageAPI->metaFn.getInvertIndex(pVnode); - SIndexMetaArg metaArg = { - .metaEx = pVnode, .idx = pStorageAPI->metaFn.storeGetIndexInfo(pVnode), .ivtIdx = pIndex, .suid = pScanNode->uid}; + SIndexMetaArg metaArg = {.metaEx = pVnode, + .idx = pStorageAPI->metaFn.storeGetIndexInfo(pVnode), + .ivtIdx = pIndex, + .suid = pScanNode->uid}; status = SFLT_NOT_INDEX; code = doFilterTag(pTagIndexCond, &metaArg, pUidList, &status, &pStorageAPI->metaFilter); if (code != 0 || status == SFLT_NOT_INDEX) { // temporarily disable it for performance sake - qWarn("failed to get tableIds from index, suid:%" PRIu64, pScanNode->uid); + qDebug("failed to get tableIds from index, suid:%" PRIu64, pScanNode->uid); code = TSDB_CODE_SUCCESS; } else { qInfo("succ to get filter result, table num: %d", (int)taosArrayGetSize(pUidList)); @@ -1128,7 +1133,7 @@ int32_t getTableList(void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, S memcpy(pPayload + sizeof(int32_t), taosArrayGet(pUidList, 0), numOfTables * sizeof(uint64_t)); } -// metaUidFilterCachePut(metaHandle, pScanNode->suid, context.digest, tListLen(context.digest), pPayload, size, 1); + pStorageAPI->metaFn.putCachedTableList(pVnode, pScanNode->suid, context.digest, tListLen(context.digest), pPayload, size, 1); digest[0] = 1; memcpy(digest + 1, context.digest, tListLen(context.digest)); } @@ -1152,15 +1157,17 @@ _end: return code; } -int32_t qGetTableList(int64_t suid, void* pVnode, void* node, SArray **tableList, void* pTaskInfo){ - SSubplan *pSubplan = (SSubplan *)node; +int32_t qGetTableList(int64_t suid, void* pVnode, void* node, SArray** tableList, void* pTaskInfo) { + SSubplan* pSubplan = (SSubplan*)node; SScanPhysiNode pNode = {0}; pNode.suid = suid; pNode.uid = suid; pNode.tableType = TSDB_SUPER_TABLE; STableListInfo* pTableListInfo = tableListCreate(); - uint8_t digest[17] = {0}; - int code = getTableList(pVnode, &pNode, pSubplan ? pSubplan->pTagCond : NULL, pSubplan ? pSubplan->pTagIndexCond : NULL, pTableListInfo, digest, "qGetTableList", &((SExecTaskInfo*)pTaskInfo)->storageAPI); + uint8_t digest[17] = {0}; + int code = + getTableList(pVnode, &pNode, pSubplan ? pSubplan->pTagCond : NULL, pSubplan ? pSubplan->pTagIndexCond : NULL, + pTableListInfo, digest, "qGetTableList", &((SExecTaskInfo*)pTaskInfo)->storageAPI); *tableList = pTableListInfo->pTableList; pTableListInfo->pTableList = NULL; tableListDestroy(pTableListInfo); @@ -1181,7 +1188,7 @@ size_t getTableTagsBufLen(const SNodeList* pGroups) { } int32_t getGroupIdFromTagsVal(void* pVnode, uint64_t uid, SNodeList* pGroupNode, char* keyBuf, uint64_t* pGroupId, - SStorageAPI* pAPI) { + SStorageAPI* pAPI) { SMetaReader mr = {0}; pAPI->metaReaderFn.initReader(&mr, pVnode, 0, &pAPI->metaFn); @@ -1560,7 +1567,8 @@ static int32_t setSelectValueColumnInfo(SqlFunctionCtx* pCtx, int32_t numOfOutpu return TSDB_CODE_SUCCESS; } -SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, int32_t** rowEntryInfoOffset, SFunctionStateStore* pStore) { +SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, int32_t** rowEntryInfoOffset, + SFunctionStateStore* pStore) { SqlFunctionCtx* pFuncCtx = (SqlFunctionCtx*)taosMemoryCalloc(numOfOutput, sizeof(SqlFunctionCtx)); if (pFuncCtx == NULL) { return NULL; @@ -1849,7 +1857,7 @@ void getNextTimeWindow(const SInterval* pInterval, STimeWindow* tw, int32_t orde } struct tm tm; - time_t t = (time_t) key; + time_t t = (time_t)key; taosLocalTime(&t, &tm, NULL); int mon = (int)(tm.tm_year * 12 + tm.tm_mon + duration * factor); @@ -2079,8 +2087,8 @@ static int32_t sortTableGroup(STableListInfo* pTableListInfo) { return TSDB_CODE_SUCCESS; } -int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle* pHandle, SScanPhysiNode* pScanNode, SNodeList* group, - bool groupSort, uint8_t *digest, SStorageAPI* pAPI) { +int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle* pHandle, SScanPhysiNode* pScanNode, + SNodeList* group, bool groupSort, uint8_t* digest, SStorageAPI* pAPI) { int32_t code = TSDB_CODE_SUCCESS; bool groupByTbname = groupbyTbname(group); @@ -2132,7 +2140,8 @@ int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags } uint8_t digest[17] = {0}; - int32_t code = getTableList(pHandle->vnode, pScanNode, pTagCond, pTagIndexCond, pTableListInfo, digest, idStr, &pTaskInfo->storageAPI); + int32_t code = getTableList(pHandle->vnode, pScanNode, pTagCond, pTagIndexCond, pTableListInfo, digest, idStr, + &pTaskInfo->storageAPI); if (code != TSDB_CODE_SUCCESS) { qError("failed to getTableList, code: %s", tstrerror(code)); return code; @@ -2150,7 +2159,8 @@ int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags return TSDB_CODE_SUCCESS; } - code = buildGroupIdMapForAllTables(pTableListInfo, pHandle, pScanNode, pGroupTags, groupSort, digest, &pTaskInfo->storageAPI); + code = buildGroupIdMapForAllTables(pTableListInfo, pHandle, pScanNode, pGroupTags, groupSort, digest, + &pTaskInfo->storageAPI); if (code != TSDB_CODE_SUCCESS) { return code; } diff --git a/source/libs/executor/src/filloperator.c b/source/libs/executor/src/filloperator.c index 1106c6e29fb093bdc145f41e91957ffc5cc9e9b2..92152924f9dc196149663069643d588c871918ec 100644 --- a/source/libs/executor/src/filloperator.c +++ b/source/libs/executor/src/filloperator.c @@ -185,6 +185,8 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) { // the scan order may be different from the output result order for agg interval operator. if (pDownstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL) { order = ((SIntervalAggOperatorInfo*) pDownstream->info)->resultTsOrder; + } else { + order = pInfo->pFillInfo->order; } #endif diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 2f108c83b0b8e1adfa07c69b72440f6e834c404e..008a3697fcc508810af0c30ece6f0c61c408c4ba 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -55,7 +55,6 @@ typedef struct STableMergeScanSortSourceParam { int32_t readerIdx; uint64_t uid; SSDataBlock* inputBlock; - bool multiReader; STsdbReader* dataReader; } STableMergeScanSortSourceParam; @@ -466,7 +465,12 @@ static STableCachedVal* createTableCacheVal(const SMetaReader* pMetaReader) { } // const void *key, size_t keyLen, void *value -static void freeCachedMetaItem(const void* key, size_t keyLen, void* value) { freeTableCachedVal(value); } +static void freeCachedMetaItem(const void* key, size_t keyLen, void* value, void* ud) { + (void)key; + (void)keyLen; + (void)ud; + freeTableCachedVal(value); +} static void doSetNullValue(SSDataBlock* pBlock, const SExprInfo* pExpr, int32_t numOfExpr) { for (int32_t j = 0; j < numOfExpr; ++j) { @@ -501,7 +505,7 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int // 1. check if it is existed in meta cache if (pCache == NULL) { - pHandle->api.metaReaderFn.initReader(&mr, pHandle->vnode, 0, &pHandle->api.metaFn); + pHandle->api.metaReaderFn.initReader(&mr, pHandle->vnode, META_READER_NOLOCK, &pHandle->api.metaFn); code = pHandle->api.metaReaderFn.getEntryGetUidCache(&mr, pBlock->info.id.uid); if (code != TSDB_CODE_SUCCESS) { // when encounter the TSDB_CODE_PAR_TABLE_NOT_EXIST error, we proceed. @@ -554,7 +558,7 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int freeReader = true; int32_t ret = taosLRUCacheInsert(pCache->pTableMetaEntryCache, &pBlock->info.id.uid, sizeof(uint64_t), pVal, - sizeof(STableCachedVal), freeCachedMetaItem, NULL, TAOS_LRU_PRIORITY_LOW); + sizeof(STableCachedVal), freeCachedMetaItem, NULL, TAOS_LRU_PRIORITY_LOW, NULL); if (ret != TAOS_LRU_STATUS_OK) { qError("failed to put meta into lru cache, code:%d, %s", ret, idStr); freeTableCachedVal(pVal); @@ -2654,8 +2658,7 @@ static SSDataBlock* getTableDataBlockImpl(void* param) { int64_t st = taosGetTimestampUs(); void* p = tableListGetInfo(pInfo->base.pTableListInfo, readIdx + pInfo->tableStartIndex); SReadHandle* pHandle = &pInfo->base.readHandle; - - if (NULL == source->dataReader || !source->multiReader) { + if (NULL == source->dataReader) { code = pAPI->tsdReader.tsdReaderOpen(pHandle->vnode, pQueryCond, p, 1, pBlock, (void**)&source->dataReader, GET_TASKID(pTaskInfo), false, NULL); if (code != 0) { T_LONG_JMP(pTaskInfo->env, code); @@ -2719,19 +2722,15 @@ static SSDataBlock* getTableDataBlockImpl(void* param) { pInfo->base.readRecorder.elapsedTime += (taosGetTimestampUs() - st) / 1000.0; qTrace("tsdb/read-table-data: %p, close reader", reader); - if (!source->multiReader) { - pAPI->tsdReader.tsdReaderClose(pInfo->base.dataReader); - source->dataReader = NULL; - } pInfo->base.dataReader = NULL; return pBlock; } - if (!source->multiReader) { - pAPI->tsdReader.tsdReaderClose(pInfo->base.dataReader); - source->dataReader = NULL; - } + pAPI->tsdReader.tsdReaderClose(source->dataReader); + source->dataReader = NULL; pInfo->base.dataReader = NULL; + blockDataDestroy(source->inputBlock); + source->inputBlock = NULL; return NULL; } @@ -2787,7 +2786,19 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) { // todo the total available buffer should be determined by total capacity of buffer of this task. // the additional one is reserved for merge result - pInfo->sortBufSize = pInfo->bufPageSize * (tableEndIdx - tableStartIdx + 1 + 1); + // pInfo->sortBufSize = pInfo->bufPageSize * (tableEndIdx - tableStartIdx + 1 + 1); + int32_t kWay = (TSDB_MAX_BYTES_PER_ROW * 2) / (pInfo->pResBlock->info.rowSize); + if (kWay >= 128) { + kWay = 128; + } else if (kWay <= 2) { + kWay = 2; + } else { + int i = 2; + while (i * 2 <= kWay) i = i * 2; + kWay = i; + } + + pInfo->sortBufSize = pInfo->bufPageSize * (kWay + 1); int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize; pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, SORT_MULTISOURCE_MERGE, pInfo->bufPageSize, numOfBufPage, pInfo->pSortInputBlock, pTaskInfo->id.str); @@ -2802,9 +2813,7 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) { STableMergeScanSortSourceParam param = {0}; param.readerIdx = i; param.pOperator = pOperator; - param.multiReader = (numOfTable <= MULTI_READER_MAX_TABLE_NUM) ? true : false; param.inputBlock = createOneDataBlock(pInfo->pResBlock, false); - blockDataEnsureCapacity(param.inputBlock, pOperator->resultInfo.capacity); taosArrayPush(pInfo->sortSourceParams, ¶m); @@ -2887,6 +2896,11 @@ SSDataBlock* getSortedTableMergeScanBlockData(SSortHandle* pHandle, SSDataBlock* } } + if (tsortIsClosed(pHandle)) { + terrno = TSDB_CODE_TSC_QUERY_CANCELLED; + T_LONG_JMP(pOperator->pTaskInfo->env, terrno); + } + bool limitReached = applyLimitOffset(&pInfo->limitInfo, pResBlock, pTaskInfo); qDebug("%s get sorted row block, rows:%" PRId64 ", limit:%" PRId64, GET_TASKID(pTaskInfo), pResBlock->info.rows, pInfo->limitInfo.numOfOutputRows); diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c index 8f287946f06e653494389c47cb63d4581296f1ef..585c2e8c541461347475d302cd7a305e18cea336 100644 --- a/source/libs/executor/src/sortoperator.c +++ b/source/libs/executor/src/sortoperator.c @@ -233,6 +233,11 @@ SSDataBlock* doSort(SOperatorInfo* pOperator) { // multi-group case not handle here SSDataBlock* pBlock = NULL; while (1) { + if (tsortIsClosed(pInfo->pSortHandle)) { + terrno = TSDB_CODE_TSC_QUERY_CANCELLED; + T_LONG_JMP(pOperator->pTaskInfo->env, terrno); + } + pBlock = getSortedBlockData(pInfo->pSortHandle, pInfo->binfo.pRes, pOperator->resultInfo.capacity, pInfo->matchInfo.pList, pInfo); if (pBlock == NULL) { @@ -445,6 +450,11 @@ SSDataBlock* doGroupSort(SOperatorInfo* pOperator) { SSDataBlock* pBlock = NULL; while (pInfo->pCurrSortHandle != NULL) { + if (tsortIsClosed(pInfo->pCurrSortHandle)) { + terrno = TSDB_CODE_TSC_QUERY_CANCELLED; + T_LONG_JMP(pOperator->pTaskInfo->env, terrno); + } + // beginSortGroup would fetch all child blocks of pInfo->currGroupId; ASSERT(pInfo->childOpStatus != CHILD_OP_SAME_GROUP); pBlock = getGroupSortedBlockData(pInfo->pCurrSortHandle, pInfo->binfo.pRes, pOperator->resultInfo.capacity, diff --git a/source/libs/executor/src/sysscanoperator.c b/source/libs/executor/src/sysscanoperator.c index 06138d7d5c5bd8d438451b07198f85c793e02f8b..70fe42595e63f1135df3dbf6cab7a27163f0a6c5 100644 --- a/source/libs/executor/src/sysscanoperator.c +++ b/source/libs/executor/src/sysscanoperator.c @@ -1601,6 +1601,8 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) { SSysTableScanInfo* pInfo = pOperator->info; char dbName[TSDB_DB_NAME_LEN] = {0}; + blockDataCleanup(pInfo->pRes); + const char* name = tNameGetTableName(&pInfo->name); if (pInfo->showRewrite) { getDBNameFromCondition(pInfo->pCondition, dbName); diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index f29ea5057e06619a76f0b2ea79091a1eca42d2a6..78d1e97554eb65fd7b2d26afef8d454ca05ec9fc 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -103,9 +103,8 @@ static int32_t setTimeWindowOutputBuf(SResultRowInfo* pResultRowInfo, STimeWindo return TSDB_CODE_SUCCESS; } -static void updateTimeWindowInfo(SColumnInfoData* pColData, STimeWindow* pWin, bool includeEndpoint) { +static void updateTimeWindowInfo(SColumnInfoData* pColData, STimeWindow* pWin, int64_t delta) { int64_t* ts = (int64_t*)pColData->pData; - int32_t delta = includeEndpoint ? 1 : 0; int64_t duration = pWin->ekey - pWin->skey + delta; ts[2] = duration; // set the duration @@ -642,7 +641,7 @@ static void doInterpUnclosedTimeWindow(SOperatorInfo* pOperatorInfo, int32_t num setResultRowInterpo(pResult, RESULT_ROW_END_INTERP); setNotInterpoWindowKey(pSup->pCtx, numOfExprs, RESULT_ROW_START_INTERP); - updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &w, true); + updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &w, 1); applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, 0, pBlock->info.rows, numOfExprs); @@ -917,7 +916,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul doWindowBorderInterpolation(pInfo, pBlock, pResult, &win, startPos, forwardRows, pSup); } - updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &win, true); + updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &win, 1); applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, pBlock->info.rows, numOfOutput); @@ -952,7 +951,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul addToOpenWindowList(pResultRowInfo, pResult, tableGroupId); } #endif - updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &nextWin, true); + updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &nextWin, 1); applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, pBlock->info.rows, numOfOutput); doCloseWindow(pResultRowInfo, pInfo, pResult); @@ -1119,7 +1118,7 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI T_LONG_JMP(pTaskInfo->env, TSDB_CODE_APP_ERROR); } - updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &window, false); + updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &window, 0); applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex, pRowSup->numOfRows, pBlock->info.rows, numOfOutput); @@ -1144,7 +1143,7 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI T_LONG_JMP(pTaskInfo->env, TSDB_CODE_APP_ERROR); } - updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pRowSup->win, false); + updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pRowSup->win, 0); applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex, pRowSup->numOfRows, pBlock->info.rows, numOfOutput); } @@ -1751,7 +1750,7 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSessionAggOperator } // pInfo->numOfRows data belong to the current session window - updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &window, false); + updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &window, 0); applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex, pRowSup->numOfRows, pBlock->info.rows, numOfOutput); @@ -1769,7 +1768,7 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSessionAggOperator T_LONG_JMP(pTaskInfo->env, TSDB_CODE_APP_ERROR); } - updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pRowSup->win, false); + updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pRowSup->win, 0); applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex, pRowSup->numOfRows, pBlock->info.rows, numOfOutput); } @@ -2421,7 +2420,7 @@ static void doStreamIntervalAggImpl(SOperatorInfo* pOperatorInfo, SSDataBlock* p tSimpleHashPut(pInfo->aggSup.pResultRowHashTable, &key, sizeof(SWinKey), &pResPos, POINTER_BYTES); } - updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &nextWin, true); + updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &nextWin, 1); applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, pSDataBlock->info.rows, numOfOutput); key.ts = nextWin.skey; @@ -3093,14 +3092,14 @@ static int32_t initSessionOutputBuf(SResultWindowInfo* pWinInfo, SResultRow** pR static int32_t doOneWindowAggImpl(SColumnInfoData* pTimeWindowData, SResultWindowInfo* pCurWin, SResultRow** pResult, int32_t startIndex, int32_t winRows, int32_t rows, int32_t numOutput, - SOperatorInfo* pOperator) { + SOperatorInfo* pOperator, int64_t winDelta) { SExprSupp* pSup = &pOperator->exprSupp; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; int32_t code = initSessionOutputBuf(pCurWin, pResult, pSup->pCtx, numOutput, pSup->rowEntryInfoOffset); if (code != TSDB_CODE_SUCCESS || (*pResult) == NULL) { return TSDB_CODE_OUT_OF_MEMORY; } - updateTimeWindowInfo(pTimeWindowData, &pCurWin->sessionWin.win, false); + updateTimeWindowInfo(pTimeWindowData, &pCurWin->sessionWin.win, winDelta); applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, pTimeWindowData, startIndex, winRows, rows, numOutput); return TSDB_CODE_SUCCESS; } @@ -3160,7 +3159,11 @@ static void compactSessionWindow(SOperatorInfo* pOperator, SResultWindowInfo* pC SResultRow* pWinResult = NULL; initSessionOutputBuf(&winInfo, &pWinResult, pAggSup->pDummyCtx, numOfOutput, pSup->rowEntryInfoOffset); pCurWin->sessionWin.win.ekey = TMAX(pCurWin->sessionWin.win.ekey, winInfo.sessionWin.win.ekey); - updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pCurWin->sessionWin.win, true); + int64_t winDelta = 0; + if (IS_FINAL_OP(pInfo)) { + winDelta = pAggSup->gap; + } + updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pCurWin->sessionWin.win, winDelta); compactFunctions(pSup->pCtx, pAggSup->pDummyCtx, numOfOutput, pTaskInfo, &pInfo->twAggSup.timeWindowData); tSimpleHashRemove(pStUpdated, &winInfo.sessionWin, sizeof(SSessionKey)); if (winInfo.isOutput && pStDeleted) { @@ -3179,7 +3182,7 @@ int32_t saveSessionOutputBuf(SStreamAggSupporter* pAggSup, SResultWindowInfo* pW } static void doStreamSessionAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBlock, SSHashObj* pStUpdated, - SSHashObj* pStDeleted, bool hasEndTs) { + SSHashObj* pStDeleted, bool hasEndTs, bool addGap) { SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SStreamSessionAggOperatorInfo* pInfo = pOperator->info; int32_t numOfOutput = pOperator->exprSupp.numOfExprs; @@ -3217,8 +3220,12 @@ static void doStreamSessionAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSData T_LONG_JMP(pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY); } + int64_t winDelta = 0; + if (addGap) { + winDelta = pAggSup->gap; + } code = doOneWindowAggImpl(&pInfo->twAggSup.timeWindowData, &winInfo, &pResult, i, winRows, rows, numOfOutput, - pOperator); + pOperator, winDelta); if (code != TSDB_CODE_SUCCESS || pResult == NULL) { T_LONG_JMP(pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY); } @@ -3378,7 +3385,7 @@ static void rebuildSessionWindow(SOperatorInfo* pOperator, SArray* pWinArray, SS } } num++; - updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &parentWin.sessionWin.win, true); + updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &parentWin.sessionWin.win, pAggSup->gap); initSessionOutputBuf(&childWin, &pChResult, pChild->exprSupp.pCtx, numOfOutput, pChild->exprSupp.rowEntryInfoOffset); compactFunctions(pSup->pCtx, pChild->exprSupp.pCtx, numOfOutput, pTaskInfo, &pInfo->twAggSup.timeWindowData); @@ -3540,7 +3547,7 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) { } // the pDataBlock are always the same one, no need to call this again setInputDataBlock(pSup, pBlock, TSDB_ORDER_ASC, MAIN_SCAN, true); - doStreamSessionAggImpl(pOperator, pBlock, pInfo->pStUpdated, pInfo->pStDeleted, IS_FINAL_OP(pInfo)); + doStreamSessionAggImpl(pOperator, pBlock, pInfo->pStUpdated, pInfo->pStDeleted, IS_FINAL_OP(pInfo), true); if (IS_FINAL_OP(pInfo)) { int32_t chIndex = getChildIndex(pBlock); int32_t size = taosArrayGetSize(pInfo->pChildren); @@ -3555,7 +3562,7 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) { } SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, chIndex); setInputDataBlock(&pChildOp->exprSupp, pBlock, TSDB_ORDER_ASC, MAIN_SCAN, true); - doStreamSessionAggImpl(pChildOp, pBlock, NULL, NULL, true); + doStreamSessionAggImpl(pChildOp, pBlock, NULL, NULL, true, false); } pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey); pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.watermark); @@ -3760,7 +3767,7 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) { } // the pDataBlock are always the same one, no need to call this again setInputDataBlock(pSup, pBlock, TSDB_ORDER_ASC, MAIN_SCAN, true); - doStreamSessionAggImpl(pOperator, pBlock, pInfo->pStUpdated, NULL, false); + doStreamSessionAggImpl(pOperator, pBlock, pInfo->pStUpdated, NULL, false, false); maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey); } @@ -4010,7 +4017,7 @@ static void doStreamStateAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl continue; } code = doOneWindowAggImpl(&pInfo->twAggSup.timeWindowData, &curWin.winInfo, &pResult, i, winRows, rows, numOfOutput, - pOperator); + pOperator, 0); if (code != TSDB_CODE_SUCCESS || pResult == NULL) { T_LONG_JMP(pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY); } @@ -4283,7 +4290,7 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR continue; } - updateTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &currWin, true); + updateTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &currWin, 1); applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &iaInfo->twAggSup.timeWindowData, startPos, currPos - startPos, pBlock->info.rows, pSup->numOfExprs); @@ -4303,7 +4310,7 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR miaInfo->curTs = currWin.skey; } - updateTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &currWin, true); + updateTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &currWin, 1); applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &iaInfo->twAggSup.timeWindowData, startPos, currPos - startPos, pBlock->info.rows, pSup->numOfExprs); } @@ -4624,7 +4631,7 @@ static void doMergeIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo* // window start(end) key interpolation doWindowBorderInterpolation(iaInfo, pBlock, pResult, &nextWin, startPos, forwardRows, pExprSup); - updateTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &nextWin, true); + updateTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &nextWin, 1); applyAggFunctionOnPartialTuples(pTaskInfo, pExprSup->pCtx, &iaInfo->twAggSup.timeWindowData, startPos, forwardRows, pBlock->info.rows, numOfOutput); doCloseWindow(pResultRowInfo, iaInfo, pResult); diff --git a/source/libs/executor/src/tsort.c b/source/libs/executor/src/tsort.c index f26aa8a97cafbeeb6f80248bb84b6b01f4ad0be4..58b3428b5bae2a88cac2ae51e7cfc9f3f7590948 100644 --- a/source/libs/executor/src/tsort.c +++ b/source/libs/executor/src/tsort.c @@ -46,6 +46,7 @@ struct SSortHandle { SMsortComparParam cmpParam; int32_t numOfCompletedSources; bool opened; + int8_t closed; const char* idStr; bool inMemSort; bool needAdjust; @@ -152,7 +153,7 @@ void tsortDestroySortHandle(SSortHandle* pSortHandle) { tsortClose(pSortHandle); if (pSortHandle->pMergeTree != NULL) { - tMergeTreeDestroy(pSortHandle->pMergeTree); + tMergeTreeDestroy(&pSortHandle->pMergeTree); } destroyDiskbasedBuf(pSortHandle->pBuf); @@ -581,6 +582,11 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) { SArray* pPageIdList = taosArrayInit(4, sizeof(int32_t)); while (1) { + if (tsortIsClosed(pHandle)) { + code = terrno = TSDB_CODE_TSC_QUERY_CANCELLED; + return code; + } + SSDataBlock* pDataBlock = getSortedBlockDataInner(pHandle, &pHandle->cmpParam, numOfRows); if (pDataBlock == NULL) { break; @@ -609,7 +615,7 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) { } sortComparCleanup(&pHandle->cmpParam); - tMergeTreeDestroy(pHandle->pMergeTree); + tMergeTreeDestroy(&pHandle->pMergeTree); pHandle->numOfCompletedSources = 0; SSDataBlock* pBlock = createOneDataBlock(pHandle->pDataBlock, false); @@ -803,10 +809,19 @@ int32_t tsortOpen(SSortHandle* pHandle) { } int32_t tsortClose(SSortHandle* pHandle) { - // do nothing + atomic_val_compare_exchange_8(&pHandle->closed, 0, 1); + taosMsleep(10); return TSDB_CODE_SUCCESS; } +bool tsortIsClosed(SSortHandle* pHandle) { + return atomic_val_compare_exchange_8(&pHandle->closed, 1, 2); +} + +void tsortSetClosed(SSortHandle* pHandle) { + atomic_store_8(&pHandle->closed, 2); +} + int32_t tsortSetFetchRawDataFp(SSortHandle* pHandle, _sort_fetch_block_fn_t fetchFp, void (*fp)(SSDataBlock*, void*), void* param) { pHandle->fetchfp = fetchFp; @@ -826,6 +841,9 @@ int32_t tsortSetCompareGroupId(SSortHandle* pHandle, bool compareGroupId) { } STupleHandle* tsortNextTuple(SSortHandle* pHandle) { + if (tsortIsClosed(pHandle)) { + return NULL; + } if (pHandle->cmpParam.numOfSources == pHandle->numOfCompletedSources) { return NULL; } diff --git a/source/libs/function/inc/functionMgtInt.h b/source/libs/function/inc/functionMgtInt.h index 2a8f60d4d242da0819e72cc04b5455ab4db4167f..6d23f65cf376f4f0dcb8e772cef49cfa468ce68c 100644 --- a/source/libs/function/inc/functionMgtInt.h +++ b/source/libs/function/inc/functionMgtInt.h @@ -51,6 +51,7 @@ extern "C" { #define FUNC_MGT_CUMULATIVE_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(22) #define FUNC_MGT_INTERP_PC_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(23) #define FUNC_MGT_GEOMETRY_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(24) +#define FUNC_MGT_FORBID_SYSTABLE_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(25) #define FUNC_MGT_TEST_MASK(val, mask) (((val) & (mask)) != 0) diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 657b02c205e1b28d2ab8a9b3618324c422c6f877..6eb2be34b3ca1654e25a83c6bf0ed9aefd11bf0d 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -2348,7 +2348,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "leastsquares", .type = FUNCTION_TYPE_LEASTSQUARES, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC, .translateFunc = translateLeastSQR, .getEnvFunc = getLeastSQRFuncEnv, .initFunc = leastSQRFunctionSetup, @@ -2456,7 +2456,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "top", .type = FUNCTION_TYPE_TOP, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_KEEP_ORDER_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_FILL_FUNC, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_KEEP_ORDER_FUNC | + FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_FILL_FUNC, .translateFunc = translateTopBot, .getEnvFunc = getTopBotFuncEnv, .initFunc = topBotFunctionSetup, @@ -2471,7 +2472,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "bottom", .type = FUNCTION_TYPE_BOTTOM, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_KEEP_ORDER_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_FILL_FUNC, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_KEEP_ORDER_FUNC | + FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_FILL_FUNC, .translateFunc = translateTopBot, .getEnvFunc = getTopBotFuncEnv, .initFunc = topBotFunctionSetup, @@ -2528,7 +2530,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "elapsed", .type = FUNCTION_TYPE_ELAPSED, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_INTERVAL_INTERPO_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_INTERVAL_INTERPO_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | + FUNC_MGT_FORBID_SYSTABLE_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED, .dataRequiredFunc = statisDataRequired, .translateFunc = translateElapsed, .getEnvFunc = getElapsedFuncEnv, @@ -2568,7 +2571,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "interp", .type = FUNCTION_TYPE_INTERP, .classification = FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_INTERVAL_INTERPO_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | - FUNC_MGT_FORBID_STREAM_FUNC|FUNC_MGT_KEEP_ORDER_FUNC, + FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC | FUNC_MGT_KEEP_ORDER_FUNC, .translateFunc = translateInterp, .getEnvFunc = getSelectivityFuncEnv, .initFunc = functionSetup, @@ -2580,7 +2583,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "derivative", .type = FUNCTION_TYPE_DERIVATIVE, .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | - FUNC_MGT_KEEP_ORDER_FUNC | FUNC_MGT_CUMULATIVE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC, + FUNC_MGT_KEEP_ORDER_FUNC | FUNC_MGT_CUMULATIVE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC, .translateFunc = translateDerivative, .getEnvFunc = getDerivativeFuncEnv, .initFunc = derivativeFuncSetup, @@ -2592,7 +2595,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "irate", .type = FUNCTION_TYPE_IRATE, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | + FUNC_MGT_FORBID_SYSTABLE_FUNC, .translateFunc = translateIrate, .getEnvFunc = getIrateFuncEnv, .initFunc = irateFuncSetup, @@ -2603,7 +2607,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "last_row", .type = FUNCTION_TYPE_LAST_ROW, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_KEEP_ORDER_FUNC, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | + FUNC_MGT_KEEP_ORDER_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC, .translateFunc = translateFirstLast, .dynDataRequiredFunc = lastDynDataReq, .getEnvFunc = getFirstLastFuncEnv, @@ -2618,7 +2623,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "_cache_last_row", .type = FUNCTION_TYPE_CACHE_LAST_ROW, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | + FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC, .translateFunc = translateFirstLast, .getEnvFunc = getFirstLastFuncEnv, .initFunc = functionSetup, @@ -2628,7 +2634,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "_cache_last", .type = FUNCTION_TYPE_CACHE_LAST, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_FORBID_STREAM_FUNC, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC, .translateFunc = translateFirstLast, .getEnvFunc = getFirstLastFuncEnv, .initFunc = functionSetup, @@ -2638,7 +2644,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "_last_row_partial", .type = FUNCTION_TYPE_LAST_PARTIAL, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | + FUNC_MGT_FORBID_SYSTABLE_FUNC, .translateFunc = translateFirstLastPartial, .dynDataRequiredFunc = lastDynDataReq, .getEnvFunc = getFirstLastFuncEnv, @@ -2649,7 +2656,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "_last_row_merge", .type = FUNCTION_TYPE_LAST_MERGE, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | + FUNC_MGT_FORBID_SYSTABLE_FUNC, .translateFunc = translateFirstLastMerge, .getEnvFunc = getFirstLastFuncEnv, .initFunc = functionSetup, @@ -2659,7 +2667,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "first", .type = FUNCTION_TYPE_FIRST, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_KEEP_ORDER_FUNC, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | + FUNC_MGT_KEEP_ORDER_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC, .translateFunc = translateFirstLast, .dynDataRequiredFunc = firstDynDataReq, .getEnvFunc = getFirstLastFuncEnv, @@ -2674,7 +2683,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "_first_partial", .type = FUNCTION_TYPE_FIRST_PARTIAL, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | + FUNC_MGT_FORBID_SYSTABLE_FUNC, .translateFunc = translateFirstLastPartial, .dynDataRequiredFunc = firstDynDataReq, .getEnvFunc = getFirstLastFuncEnv, @@ -2686,7 +2696,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "_first_merge", .type = FUNCTION_TYPE_FIRST_MERGE, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | + FUNC_MGT_FORBID_SYSTABLE_FUNC, .translateFunc = translateFirstLastMerge, .getEnvFunc = getFirstLastFuncEnv, .initFunc = functionSetup, @@ -2697,7 +2708,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "last", .type = FUNCTION_TYPE_LAST, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_KEEP_ORDER_FUNC, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | + FUNC_MGT_KEEP_ORDER_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC, .translateFunc = translateFirstLast, .dynDataRequiredFunc = lastDynDataReq, .getEnvFunc = getFirstLastFuncEnv, @@ -2712,7 +2724,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "_last_partial", .type = FUNCTION_TYPE_LAST_PARTIAL, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | + FUNC_MGT_FORBID_SYSTABLE_FUNC, .translateFunc = translateFirstLastPartial, .dynDataRequiredFunc = lastDynDataReq, .getEnvFunc = getFirstLastFuncEnv, @@ -2724,7 +2737,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "_last_merge", .type = FUNCTION_TYPE_LAST_MERGE, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | + FUNC_MGT_FORBID_SYSTABLE_FUNC, .translateFunc = translateFirstLastMerge, .getEnvFunc = getFirstLastFuncEnv, .initFunc = functionSetup, @@ -2735,7 +2749,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "twa", .type = FUNCTION_TYPE_TWA, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_INTERVAL_INTERPO_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_INTERVAL_INTERPO_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | + FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC, .translateFunc = translateInNumOutDou, .dataRequiredFunc = statisDataRequired, .getEnvFunc = getTwaFuncEnv, @@ -2826,7 +2841,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "diff", .type = FUNCTION_TYPE_DIFF, .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | - FUNC_MGT_KEEP_ORDER_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_CUMULATIVE_FUNC, + FUNC_MGT_KEEP_ORDER_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_CUMULATIVE_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC, .translateFunc = translateDiff, .getEnvFunc = getDiffFuncEnv, .initFunc = diffFunctionSetup, @@ -2839,7 +2854,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "statecount", .type = FUNCTION_TYPE_STATE_COUNT, .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | - FUNC_MGT_FORBID_STREAM_FUNC, + FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC, .translateFunc = translateStateCount, .getEnvFunc = getStateFuncEnv, .initFunc = functionSetup, @@ -2851,7 +2866,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "stateduration", .type = FUNCTION_TYPE_STATE_DURATION, .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | - FUNC_MGT_FORBID_STREAM_FUNC, + FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC, .translateFunc = translateStateDuration, .getEnvFunc = getStateFuncEnv, .initFunc = functionSetup, @@ -2863,7 +2878,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "csum", .type = FUNCTION_TYPE_CSUM, .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | - FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_CUMULATIVE_FUNC | FUNC_MGT_KEEP_ORDER_FUNC, + FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_CUMULATIVE_FUNC | FUNC_MGT_KEEP_ORDER_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC, .translateFunc = translateCsum, .getEnvFunc = getCsumFuncEnv, .initFunc = functionSetup, @@ -2876,7 +2891,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "mavg", .type = FUNCTION_TYPE_MAVG, .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | - FUNC_MGT_FORBID_STREAM_FUNC, + FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC, .translateFunc = translateMavg, .getEnvFunc = getMavgFuncEnv, .initFunc = mavgFunctionSetup, @@ -2887,7 +2902,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "sample", .type = FUNCTION_TYPE_SAMPLE, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_KEEP_ORDER_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_FILL_FUNC, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_KEEP_ORDER_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | + FUNC_MGT_FORBID_FILL_FUNC, .translateFunc = translateSample, .getEnvFunc = getSampleFuncEnv, .initFunc = sampleFunctionSetup, diff --git a/source/libs/function/src/functionMgt.c b/source/libs/function/src/functionMgt.c index 327bc7da71036f079c02e0160cc501b48df5fe14..345020cee23ca27ce7d0ec1a05103042b5b6fc23 100644 --- a/source/libs/function/src/functionMgt.c +++ b/source/libs/function/src/functionMgt.c @@ -219,6 +219,8 @@ bool fmIsKeepOrderFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, F bool fmIsCumulativeFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_CUMULATIVE_FUNC); } +bool fmIsForbidSysTableFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_FORBID_SYSTABLE_FUNC); } + bool fmIsInterpFunc(int32_t funcId) { if (funcId < 0 || funcId >= funcMgtBuiltinsNum) { return false; diff --git a/source/libs/index/src/indexFstFile.c b/source/libs/index/src/indexFstFile.c index 9e7ed5210425520f7b7c02a7f9b2edc6d98756ed..e18d0bbad3a99ef089a6c94111f3d148461d954a 100644 --- a/source/libs/index/src/indexFstFile.c +++ b/source/libs/index/src/indexFstFile.c @@ -29,7 +29,10 @@ typedef struct { char buf[0]; } SDataBlock; -static void deleteDataBlockFromLRU(const void* key, size_t keyLen, void* value) { taosMemoryFree(value); } +static void deleteDataBlockFromLRU(const void* key, size_t keyLen, void* value, void* ud) { + (void)ud; + taosMemoryFree(value); +} static FORCE_INLINE void idxGenLRUKey(char* buf, const char* path, int32_t blockId) { char* p = buf; @@ -136,7 +139,7 @@ static int idxFileCtxDoReadFrom(IFileCtx* ctx, uint8_t* buf, int len, int32_t of memcpy(buf + total, blk->buf + blkOffset, nread); LRUStatus s = taosLRUCacheInsert(ctx->lru, key, strlen(key), blk, cacheMemSize, deleteDataBlockFromLRU, NULL, - TAOS_LRU_PRIORITY_LOW); + TAOS_LRU_PRIORITY_LOW, NULL); if (s != TAOS_LRU_STATUS_OK) { return -1; } diff --git a/source/libs/nodes/src/nodesTraverseFuncs.c b/source/libs/nodes/src/nodesTraverseFuncs.c index ce575ede8a5dd2020c6af39394a72d6fa39cc348..b3623a4b0a0a2707d4e62cb3a98bd2a00f85fec6 100644 --- a/source/libs/nodes/src/nodesTraverseFuncs.c +++ b/source/libs/nodes/src/nodesTraverseFuncs.c @@ -214,6 +214,18 @@ void nodesWalkExprsPostOrder(SNodeList* pList, FNodeWalker walker, void* pContex (void)walkExprs(pList, TRAVERSAL_POSTORDER, walker, pContext); } +static void checkParamIsFunc(SFunctionNode *pFunc) { + int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); + if (numOfParams > 1) { + for (int32_t i = 0; i < numOfParams; ++i) { + SNode* pPara = nodesListGetNode(pFunc->pParameterList, i); + if (nodeType(pPara) == QUERY_NODE_FUNCTION) { + ((SFunctionNode *)pPara)->node.asParam = true; + } + } + } +} + static EDealRes rewriteExprs(SNodeList* pNodeList, ETraversalOrder order, FNodeRewriter rewriter, void* pContext); static EDealRes rewriteExpr(SNode** pRawNode, ETraversalOrder order, FNodeRewriter rewriter, void* pContext) { @@ -248,9 +260,12 @@ static EDealRes rewriteExpr(SNode** pRawNode, ETraversalOrder order, FNodeRewrit case QUERY_NODE_LOGIC_CONDITION: res = rewriteExprs(((SLogicConditionNode*)pNode)->pParameterList, order, rewriter, pContext); break; - case QUERY_NODE_FUNCTION: - res = rewriteExprs(((SFunctionNode*)pNode)->pParameterList, order, rewriter, pContext); + case QUERY_NODE_FUNCTION: { + SFunctionNode* pFunc = (SFunctionNode*)pNode; + checkParamIsFunc(pFunc); + res = rewriteExprs(pFunc->pParameterList, order, rewriter, pContext); break; + } case QUERY_NODE_REAL_TABLE: case QUERY_NODE_TEMP_TABLE: break; // todo diff --git a/source/libs/parser/src/parAuthenticator.c b/source/libs/parser/src/parAuthenticator.c index 251d3bd0cba6269ac594d1538894ee6a3ee3454b..9b2ac662c84443c8efa39b308af51d65a941706a 100644 --- a/source/libs/parser/src/parAuthenticator.c +++ b/source/libs/parser/src/parAuthenticator.c @@ -28,6 +28,10 @@ typedef struct SSelectAuthCxt { SSelectStmt* pSelect; } SSelectAuthCxt; +typedef struct SAuthRewriteCxt { + STableNode* pTarget; +} SAuthRewriteCxt; + static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt); static void setUserAuthInfo(SParseContext* pCxt, const char* pDbName, const char* pTabName, AUTH_TYPE type, @@ -90,12 +94,26 @@ static int32_t mergeStableTagCond(SNode** pWhere, SNode* pTagCond) { return code; } -static int32_t appendStableTagCond(SNode** pWhere, SNode* pTagCond) { +EDealRes rewriteAuthTable(SNode* pNode, void* pContext) { + if (QUERY_NODE_COLUMN == nodeType(pNode)) { + SColumnNode* pCol = (SColumnNode*)pNode; + SAuthRewriteCxt* pCxt = (SAuthRewriteCxt*)pContext; + strcpy(pCol->tableName, pCxt->pTarget->tableName); + strcpy(pCol->tableAlias, pCxt->pTarget->tableAlias); + } + + return DEAL_RES_CONTINUE; +} + +static int32_t rewriteAppendStableTagCond(SNode** pWhere, SNode* pTagCond, STableNode* pTable) { SNode* pTagCondCopy = nodesCloneNode(pTagCond); if (NULL == pTagCondCopy) { return TSDB_CODE_OUT_OF_MEMORY; } + SAuthRewriteCxt cxt = {.pTarget = pTable}; + nodesWalkExpr(pTagCondCopy, rewriteAuthTable, &cxt); + if (NULL == *pWhere) { *pWhere = pTagCondCopy; return TSDB_CODE_SUCCESS; @@ -117,7 +135,7 @@ static EDealRes authSelectImpl(SNode* pNode, void* pContext) { STableNode* pTable = (STableNode*)pNode; pAuthCxt->errCode = checkAuth(pAuthCxt, pTable->dbName, pTable->tableName, AUTH_TYPE_READ, &pTagCond); if (TSDB_CODE_SUCCESS == pAuthCxt->errCode && NULL != pTagCond) { - pAuthCxt->errCode = appendStableTagCond(&pCxt->pSelect->pWhere, pTagCond); + pAuthCxt->errCode = rewriteAppendStableTagCond(&pCxt->pSelect->pWhere, pTagCond, pTable); } return TSDB_CODE_SUCCESS == pAuthCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR; } else if (QUERY_NODE_TEMP_TABLE == nodeType(pNode)) { @@ -152,7 +170,7 @@ static int32_t authDelete(SAuthCxt* pCxt, SDeleteStmt* pDelete) { STableNode* pTable = (STableNode*)pDelete->pFromTable; int32_t code = checkAuth(pCxt, pTable->dbName, pTable->tableName, AUTH_TYPE_WRITE, &pTagCond); if (TSDB_CODE_SUCCESS == code && NULL != pTagCond) { - code = appendStableTagCond(&pDelete->pWhere, pTagCond); + code = rewriteAppendStableTagCond(&pDelete->pWhere, pTagCond, pTable); } return code; } diff --git a/source/libs/parser/src/parCalcConst.c b/source/libs/parser/src/parCalcConst.c index 49d27f60836c5a9289faa86faf98847e72fc084c..c7219c47882212bf38a0ff8da556b02c7b586ab3 100644 --- a/source/libs/parser/src/parCalcConst.c +++ b/source/libs/parser/src/parCalcConst.c @@ -372,18 +372,33 @@ static bool notRefByOrderBy(SColumnNode* pCol, SNodeList* pOrderByList) { return !cxt.hasThisCol; } +static bool isDistinctSubQuery(SNode* pNode) { + if (NULL == pNode) { + return false; + } + switch (nodeType(pNode)) { + case QUERY_NODE_SELECT_STMT: + return ((SSelectStmt*)pNode)->isDistinct; + case QUERY_NODE_SET_OPERATOR: + return isDistinctSubQuery((((SSetOperator*)pNode)->pLeft)) || isDistinctSubQuery((((SSetOperator*)pNode)->pLeft)); + default: + break; + } + return false; +} + static bool isSetUselessCol(SSetOperator* pSetOp, int32_t index, SExprNode* pProj) { if (!isUselessCol(pProj)) { return false; } SNodeList* pLeftProjs = getChildProjection(pSetOp->pLeft); - if (!isUselessCol((SExprNode*)nodesListGetNode(pLeftProjs, index))) { + if (!isUselessCol((SExprNode*)nodesListGetNode(pLeftProjs, index)) || isDistinctSubQuery(pSetOp->pLeft)) { return false; } SNodeList* pRightProjs = getChildProjection(pSetOp->pRight); - if (!isUselessCol((SExprNode*)nodesListGetNode(pRightProjs, index))) { + if (!isUselessCol((SExprNode*)nodesListGetNode(pRightProjs, index)) || isDistinctSubQuery(pSetOp->pLeft)) { return false; } diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index b3a043fe12c0d6328c6b506ab7a327bea8cba54c..8fc4be5f956eb2430cbdaa21edc82f1fd3389606 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -916,6 +916,10 @@ static int32_t findAndSetColumn(STranslateContext* pCxt, SColumnNode** pColRef, if (QUERY_NODE_REAL_TABLE == nodeType(pTable)) { const STableMeta* pMeta = ((SRealTableNode*)pTable)->pMeta; if (isInternalPrimaryKey(pCol)) { + if (TSDB_SYSTEM_TABLE == pMeta->tableType) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COLUMN, pCol->colName); + } + setColumnInfoBySchema((SRealTableNode*)pTable, pMeta->schema, -1, pCol); *pFound = true; return TSDB_CODE_SUCCESS; @@ -1692,6 +1696,20 @@ static int32_t translateForbidStreamFunc(STranslateContext* pCxt, SFunctionNode* return TSDB_CODE_SUCCESS; } +static int32_t translateForbidSysTableFunc(STranslateContext* pCxt, SFunctionNode* pFunc) { + if (!fmIsForbidSysTableFunc(pFunc->funcId)) { + return TSDB_CODE_SUCCESS; + } + + SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt; + SNode* pTable = pSelect->pFromTable; + if (NULL != pTable && QUERY_NODE_REAL_TABLE == nodeType(pTable) && + TSDB_SYSTEM_TABLE == ((SRealTableNode*)pTable)->pMeta->tableType) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC, pFunc->functionName); + } + return TSDB_CODE_SUCCESS; +} + static int32_t translateRepeatScanFunc(STranslateContext* pCxt, SFunctionNode* pFunc) { if (!fmIsRepeatScanFunc(pFunc->funcId)) { return TSDB_CODE_SUCCESS; @@ -1760,7 +1778,7 @@ static int32_t translateMultiResFunc(STranslateContext* pCxt, SFunctionNode* pFu "%s(*) is only supported in SELECTed list", pFunc->functionName); } } - if (tsKeepColumnName && 1 == LIST_LENGTH(pFunc->pParameterList) && !pFunc->node.asAlias) { + if (tsKeepColumnName && 1 == LIST_LENGTH(pFunc->pParameterList) && !pFunc->node.asAlias && !pFunc->node.asParam) { strcpy(pFunc->node.userAlias, ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->userAlias); strcpy(pFunc->node.aliasName, pFunc->node.userAlias); } @@ -1923,6 +1941,9 @@ static int32_t translateNormalFunction(STranslateContext* pCxt, SFunctionNode* p if (TSDB_CODE_SUCCESS == code) { code = translateForbidStreamFunc(pCxt, pFunc); } + if (TSDB_CODE_SUCCESS == code) { + code = translateForbidSysTableFunc(pCxt, pFunc); + } if (TSDB_CODE_SUCCESS == code) { code = translateRepeatScanFunc(pCxt, pFunc); } @@ -2270,7 +2291,7 @@ static EDealRes doCheckExprForGroupBy(SNode** pNode, void* pContext) { } } if (isScanPseudoColumnFunc(*pNode) || QUERY_NODE_COLUMN == nodeType(*pNode)) { - if (pSelect->selectFuncNum > 1 || pSelect->hasOtherVectorFunc || !pSelect->hasSelectFunc) { + if (pSelect->selectFuncNum > 1 || pSelect->hasOtherVectorFunc || !pSelect->hasSelectFunc || (isDistinctOrderBy(pCxt) && pCxt->currClause == SQL_CLAUSE_ORDER_BY)) { return generateDealNodeErrMsg(pCxt, getGroupByErrorCode(pCxt), ((SExprNode*)(*pNode))->userAlias); } else { return rewriteColToSelectValFunc(pCxt, pNode); diff --git a/source/libs/parser/src/parUtil.c b/source/libs/parser/src/parUtil.c index 1c13f66f960b2fb341b90004655b09a2e6aa6250..f82d56ac563828c1e1fa5d523110f03ebcde4422 100644 --- a/source/libs/parser/src/parUtil.c +++ b/source/libs/parser/src/parUtil.c @@ -170,6 +170,8 @@ static char* getSyntaxErrFormat(int32_t errCode) { return "%s function is not supported in stream query"; case TSDB_CODE_PAR_GROUP_BY_NOT_ALLOWED_FUNC: return "%s function is not supported in group query"; + case TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC: + return "%s function is not supported in system table query"; case TSDB_CODE_PAR_INVALID_INTERP_CLAUSE: return "Invalid usage of RANGE clause, EVERY clause or FILL clause"; case TSDB_CODE_PAR_NO_VALID_FUNC_IN_WIN: diff --git a/source/libs/planner/src/planUtil.c b/source/libs/planner/src/planUtil.c index 4b8009347d5e44ce236f99652b4814e36b8af459..29e87b34ce046d166678ab54ae5095d6e9a859fe 100644 --- a/source/libs/planner/src/planUtil.c +++ b/source/libs/planner/src/planUtil.c @@ -51,6 +51,7 @@ static EDealRes doCreateColumn(SNode* pNode, void* pContext) { } return (TSDB_CODE_SUCCESS == nodesListAppend(pCxt->pList, pCol) ? DEAL_RES_IGNORE_CHILD : DEAL_RES_ERROR); } + case QUERY_NODE_VALUE: case QUERY_NODE_OPERATOR: case QUERY_NODE_LOGIC_CONDITION: case QUERY_NODE_FUNCTION: @@ -247,8 +248,7 @@ static int32_t adjustPartitionDataRequirement(SPartitionLogicNode* pPart, EDataO return TSDB_CODE_PLAN_INTERNAL_ERROR; } pPart->node.resultDataOrder = requirement; - pPart->node.requireDataOrder = - (requirement >= DATA_ORDER_LEVEL_IN_BLOCK ? DATA_ORDER_LEVEL_GLOBAL : DATA_ORDER_LEVEL_NONE); + pPart->node.requireDataOrder = requirement; return TSDB_CODE_SUCCESS; } diff --git a/source/libs/stream/CMakeLists.txt b/source/libs/stream/CMakeLists.txt index 5c24db5051f9b9e8e7a400d9e58dfb9714848f70..b63a8b39009187714d44d95f353ba4baa7cf8d6d 100644 --- a/source/libs/stream/CMakeLists.txt +++ b/source/libs/stream/CMakeLists.txt @@ -8,34 +8,43 @@ target_include_directories( if(${BUILD_WITH_ROCKSDB}) - IF (TD_LINUX) - target_include_directories( - stream - PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/rocksdb_static" - ) - target_link_directories( - stream - PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/rocksdb_static" - ) - target_link_libraries( - stream - PUBLIC rocksdb tdb - PRIVATE os util transport qcom executor wal index - ) - ELSE() - target_link_libraries( - stream - PUBLIC rocksdb tdb - PRIVATE os util transport qcom executor wal index - ) - - ENDIF() - - target_include_directories( - stream - PUBLIC "${TD_SOURCE_DIR}/contrib/rocksdb/include" - ) - + if (${BUILD_CONTRIB}) + target_link_libraries( + stream + PUBLIC rocksdb tdb + PRIVATE os util transport qcom executor wal index + ) + target_include_directories( + stream + PUBLIC "${TD_SOURCE_DIR}/contrib/rocksdb/include" + ) + else() + if (TD_LINUX) + target_include_directories( + stream + PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/rocksdb_static" + ) + target_link_directories( + stream + PUBLIC "${TD_SOURCE_DIR}/deps/${TD_DEPS_DIR}/rocksdb_static" + ) + target_link_libraries( + stream + PUBLIC rocksdb tdb + PRIVATE os util transport qcom executor wal index + ) + else() + target_link_libraries( + stream + PUBLIC rocksdb tdb + PRIVATE os util transport qcom executor wal index + ) + target_include_directories( + stream + PUBLIC "${TD_SOURCE_DIR}/contrib/rocksdb/include" + ) + endif() + endif() add_definitions(-DUSE_ROCKSDB) endif(${BUILD_WITH_ROCKSDB}) diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 922a1f534592dd11a1af7f34592a8251f178ec7b..7af3219f85f16bc0e87af16e66b2ef576ae2951c 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -349,6 +349,7 @@ int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, S SBlockName* pBln = (SBlockName*)pVal; hashValue = pBln->hashValue; if (!pDataBlock->info.parTbName[0]) { + memset(pDataBlock->info.parTbName, 0, TSDB_TABLE_NAME_LEN); memcpy(pDataBlock->info.parTbName, pBln->parTbName, strlen(pBln->parTbName)); } } else { diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index ccf7c3e4a4c74f9b9090a9187599af007658648b..f8e21af2c3adc06c428eede38a2dac00cbfb74cc 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -37,7 +37,6 @@ #include "syncVoteMgr.h" #include "tglobal.h" #include "tref.h" -#include "syncUtil.h" static void syncNodeEqPingTimer(void* param, void* tmrId); static void syncNodeEqElectTimer(void* param, void* tmrId); @@ -141,10 +140,10 @@ int32_t syncReconfig(int64_t rid, SSyncCfg* pNewCfg) { SSyncNode* pSyncNode = syncNodeAcquire(rid); if (pSyncNode == NULL) return -1; - if(pSyncNode->raftCfg.lastConfigIndex >= pNewCfg->lastIndex){ + if (pSyncNode->raftCfg.lastConfigIndex >= pNewCfg->lastIndex) { syncNodeRelease(pSyncNode); sInfo("vgId:%d, no need Reconfig, current index:%" PRId64 ", new index:%" PRId64, pSyncNode->vgId, - pSyncNode->raftCfg.lastConfigIndex, pNewCfg->lastIndex); + pSyncNode->raftCfg.lastConfigIndex, pNewCfg->lastIndex); return 0; } @@ -323,8 +322,8 @@ int32_t syncBeginSnapshot(int64_t rid, int64_t lastApplyIndex) { } if (pSyncNode->totalReplicaNum > 1) { - if (pSyncNode->state != TAOS_SYNC_STATE_LEADER && pSyncNode->state != TAOS_SYNC_STATE_FOLLOWER - && pSyncNode->state != TAOS_SYNC_STATE_LEARNER) { + if (pSyncNode->state != TAOS_SYNC_STATE_LEADER && pSyncNode->state != TAOS_SYNC_STATE_FOLLOWER && + pSyncNode->state != TAOS_SYNC_STATE_LEARNER) { sNTrace(pSyncNode, "new-snapshot-index:%" PRId64 " candidate or unknown state, do not delete wal", lastApplyIndex); syncNodeRelease(pSyncNode); @@ -544,7 +543,7 @@ void syncGetRetryEpSet(int64_t rid, SEpSet* pEpSet) { if (pSyncNode == NULL) return; for (int32_t i = 0; i < pSyncNode->raftCfg.cfg.totalReplicaNum; ++i) { - if(pSyncNode->raftCfg.cfg.nodeInfo[i].nodeRole == TAOS_SYNC_ROLE_LEARNER) continue; + if (pSyncNode->raftCfg.cfg.nodeInfo[i].nodeRole == TAOS_SYNC_ROLE_LEARNER) continue; SEp* pEp = &pEpSet->eps[i]; tstrncpy(pEp->fqdn, pSyncNode->raftCfg.cfg.nodeInfo[i].nodeFqdn, TSDB_FQDN_LEN); pEp->port = (pSyncNode->raftCfg.cfg.nodeInfo)[i].nodePort; @@ -579,21 +578,19 @@ int32_t syncIsCatchUp(int64_t rid) { } int32_t isCatchUp = 0; - if(pSyncNode->pLogBuf->totalIndex < 0 || pSyncNode->pLogBuf->commitIndex < 0 || + if (pSyncNode->pLogBuf->totalIndex < 0 || pSyncNode->pLogBuf->commitIndex < 0 || pSyncNode->pLogBuf->totalIndex < pSyncNode->pLogBuf->commitIndex || - pSyncNode->pLogBuf->totalIndex - pSyncNode->pLogBuf->commitIndex > SYNC_LEARNER_CATCHUP){ - sInfo("vgId:%d, Not catch up, wait one second, totalIndex:%" PRId64 " commitIndex:%" PRId64 " matchIndex:%" PRId64, - pSyncNode->vgId, pSyncNode->pLogBuf->totalIndex, pSyncNode->pLogBuf->commitIndex, - pSyncNode->pLogBuf->matchIndex); + pSyncNode->pLogBuf->totalIndex - pSyncNode->pLogBuf->commitIndex > SYNC_LEARNER_CATCHUP) { + sInfo("vgId:%d, Not catch up, wait one second, totalIndex:%" PRId64 " commitIndex:%" PRId64 " matchIndex:%" PRId64, + pSyncNode->vgId, pSyncNode->pLogBuf->totalIndex, pSyncNode->pLogBuf->commitIndex, + pSyncNode->pLogBuf->matchIndex); isCatchUp = 0; - } - else{ - sInfo("vgId:%d, Catch up, totalIndex:%" PRId64 " commitIndex:%" PRId64 " matchIndex:%" PRId64, - pSyncNode->vgId, pSyncNode->pLogBuf->totalIndex, pSyncNode->pLogBuf->commitIndex, - pSyncNode->pLogBuf->matchIndex); + } else { + sInfo("vgId:%d, Catch up, totalIndex:%" PRId64 " commitIndex:%" PRId64 " matchIndex:%" PRId64, pSyncNode->vgId, + pSyncNode->pLogBuf->totalIndex, pSyncNode->pLogBuf->commitIndex, pSyncNode->pLogBuf->matchIndex); isCatchUp = 1; } - + syncNodeRelease(pSyncNode); return isCatchUp; } @@ -606,7 +603,7 @@ ESyncRole syncGetRole(int64_t rid) { } ESyncRole role = pSyncNode->raftCfg.cfg.nodeInfo[pSyncNode->raftCfg.cfg.myIndex].nodeRole; - + syncNodeRelease(pSyncNode); return role; } @@ -801,8 +798,8 @@ SSyncNode* syncNodeOpen(SSyncInfo* pSyncInfo) { pSyncNode->vgId = pSyncInfo->vgId; SSyncCfg* pCfg = &pSyncNode->raftCfg.cfg; bool updated = false; - sInfo("vgId:%d, start to open sync node, totalReplicaNum:%d replicaNum:%d selfIndex:%d", - pSyncNode->vgId, pCfg->totalReplicaNum, pCfg->replicaNum, pCfg->myIndex); + sInfo("vgId:%d, start to open sync node, totalReplicaNum:%d replicaNum:%d selfIndex:%d", pSyncNode->vgId, + pCfg->totalReplicaNum, pCfg->replicaNum, pCfg->myIndex); for (int32_t i = 0; i < pCfg->totalReplicaNum; ++i) { SNodeInfo* pNode = &pCfg->nodeInfo[i]; if (tmsgUpdateDnodeInfo(&pNode->nodeId, &pNode->clusterId, pNode->nodeFqdn, &pNode->nodePort)) { @@ -1109,10 +1106,9 @@ int32_t syncNodeRestore(SSyncNode* pSyncNode) { int32_t syncNodeStart(SSyncNode* pSyncNode) { // start raft - if(pSyncNode->raftCfg.cfg.nodeInfo[pSyncNode->raftCfg.cfg.myIndex].nodeRole == TAOS_SYNC_ROLE_LEARNER){ + if (pSyncNode->raftCfg.cfg.nodeInfo[pSyncNode->raftCfg.cfg.myIndex].nodeRole == TAOS_SYNC_ROLE_LEARNER) { syncNodeBecomeLearner(pSyncNode, "first start"); - } - else{ + } else { if (pSyncNode->replicaNum == 1) { raftStoreNextTerm(pSyncNode); syncNodeBecomeLeader(pSyncNode, "one replica start"); @@ -1121,7 +1117,7 @@ int32_t syncNodeStart(SSyncNode* pSyncNode) { syncNodeAppendNoop(pSyncNode); } else { syncNodeBecomeFollower(pSyncNode, "first start"); - } + } } int32_t ret = 0; @@ -1437,7 +1433,7 @@ static bool syncIsConfigChanged(const SSyncCfg* pOldCfg, const SSyncCfg* pNewCfg const SNodeInfo* pNewInfo = &pNewCfg->nodeInfo[i]; if (strcmp(pOldInfo->nodeFqdn, pNewInfo->nodeFqdn) != 0) return true; if (pOldInfo->nodePort != pNewInfo->nodePort) return true; - if(pOldInfo->nodeRole != pNewInfo->nodeRole) return true; + if (pOldInfo->nodeRole != pNewInfo->nodeRole) return true; } return false; @@ -1474,10 +1470,9 @@ void syncNodeDoConfigChange(SSyncNode* pSyncNode, SSyncCfg* pNewConfig, SyncInde } // log begin config change - sNInfo(pSyncNode, "begin do config change, from %d to %d, from %" PRId64 " to %" PRId64 ", replicas:%d", - pSyncNode->vgId, - oldConfig.totalReplicaNum, pNewConfig->totalReplicaNum, - oldConfig.lastIndex, pNewConfig->lastIndex); + sNInfo(pSyncNode, "begin do config change, from %d to %d, from %" PRId64 " to %" PRId64 ", replicas:%d", + pSyncNode->vgId, oldConfig.totalReplicaNum, pNewConfig->totalReplicaNum, oldConfig.lastIndex, + pNewConfig->lastIndex); if (IamInNew) { pSyncNode->raftCfg.isStandBy = 0; // change isStandBy to normal @@ -1594,6 +1589,7 @@ void syncNodeDoConfigChange(SSyncNode* pSyncNode, SSyncCfg* pNewConfig, SyncInde // persist cfg syncWriteCfgFile(pSyncNode); +#if 0 // change isStandBy to normal (election timeout) if (pSyncNode->state == TAOS_SYNC_STATE_LEADER) { syncNodeBecomeLeader(pSyncNode, ""); @@ -1605,6 +1601,7 @@ void syncNodeDoConfigChange(SSyncNode* pSyncNode, SSyncCfg* pNewConfig, SyncInde } else { syncNodeBecomeFollower(pSyncNode, ""); } +#endif } else { // persist cfg syncWriteCfgFile(pSyncNode); @@ -2233,7 +2230,10 @@ static void syncNodeEqPeerHeartbeatTimer(void* param, void* tmrId) { syncNodeRelease(pSyncNode); } -static void deleteCacheEntry(const void* key, size_t keyLen, void* value) { taosMemoryFree(value); } +static void deleteCacheEntry(const void* key, size_t keyLen, void* value, void* ud) { + (void)ud; + taosMemoryFree(value); +} int32_t syncCacheEntry(SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry, LRUHandle** h) { SSyncLogStoreData* pData = pLogStore->data; @@ -2242,7 +2242,7 @@ int32_t syncCacheEntry(SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry, LRUHand int32_t code = 0; int32_t entryLen = sizeof(*pEntry) + pEntry->dataLen; LRUStatus status = taosLRUCacheInsert(pLogStore->pCache, &pEntry->index, sizeof(pEntry->index), pEntry, entryLen, - deleteCacheEntry, h, TAOS_LRU_PRIORITY_LOW); + deleteCacheEntry, h, TAOS_LRU_PRIORITY_LOW, NULL); if (status != TAOS_LRU_STATUS_OK) { code = -1; } @@ -2408,11 +2408,10 @@ int32_t syncNodeOnHeartbeat(SSyncNode* ths, const SRpcMsg* pRpcMsg) { pMsgReply->startTime = ths->startTime; pMsgReply->timeStamp = tsMs; - sTrace( - "vgId:%d, heartbeat msg from dnode:%d, cluster:%d, Msgterm:%" PRId64 " currentTerm:%" PRId64, - ths->vgId, DID(&(pMsg->srcId)), CID(&(pMsg->srcId)), pMsg->term, currentTerm); + sTrace("vgId:%d, heartbeat msg from dnode:%d, cluster:%d, Msgterm:%" PRId64 " currentTerm:%" PRId64, ths->vgId, + DID(&(pMsg->srcId)), CID(&(pMsg->srcId)), pMsg->term, currentTerm); - if(pMsg->term > currentTerm && ths->state == TAOS_SYNC_STATE_LEARNER){ + if (pMsg->term > currentTerm && ths->state == TAOS_SYNC_STATE_LEARNER) { raftStoreSetTerm(ths, pMsg->term); currentTerm = pMsg->term; } diff --git a/source/libs/sync/src/syncPipeline.c b/source/libs/sync/src/syncPipeline.c index 794d80bbfaec7c38eca4b38f78c62022fd6cc7cc..92f34db16d2d84b0edbf284ecb5c0b1c0a5abc60 100644 --- a/source/libs/sync/src/syncPipeline.c +++ b/source/libs/sync/src/syncPipeline.c @@ -482,6 +482,7 @@ int64_t syncLogBufferProceed(SSyncLogBuffer* pBuf, SSyncNode* pNode, SyncTerm* p if (syncLogStorePersist(pLogStore, pNode, pEntry) < 0) { sError("vgId:%d, failed to persist sync log entry from buffer since %s. index:%" PRId64, pNode->vgId, terrstr(), pEntry->index); + taosMsleep(1); goto _out; } ASSERT(pEntry->index == pBuf->matchIndex); diff --git a/source/libs/sync/src/syncUtil.c b/source/libs/sync/src/syncUtil.c index cf796c386297271d1138189ea812fe4b1fbe984c..ae1c775a18f6c47291f3065f83db0c2ff8cee94e 100644 --- a/source/libs/sync/src/syncUtil.c +++ b/source/libs/sync/src/syncUtil.c @@ -364,10 +364,10 @@ void syncLogRecvHeartbeat(SSyncNode* pSyncNode, const SyncHeartbeat* pMsg, int64 if (timeDiff > SYNC_HEARTBEAT_SLOW_MS) { pSyncNode->hbSlowNum++; - sNInfo(pSyncNode, - "recv sync-heartbeat from dnode:%d slow {term:%" PRId64 ", commit-index:%" PRId64 ", min-match:%" PRId64 - ", ts:%" PRId64 "}, %s, net elapsed:%" PRId64, - DID(&pMsg->srcId), pMsg->term, pMsg->commitIndex, pMsg->minMatchIndex, pMsg->timeStamp, s, timeDiff); + sNTrace(pSyncNode, + "recv sync-heartbeat from dnode:%d slow {term:%" PRId64 ", commit-index:%" PRId64 ", min-match:%" PRId64 + ", ts:%" PRId64 "}, %s, net elapsed:%" PRId64, + DID(&pMsg->srcId), pMsg->term, pMsg->commitIndex, pMsg->minMatchIndex, pMsg->timeStamp, s, timeDiff); } sNTrace(pSyncNode, diff --git a/source/libs/wal/src/walRef.c b/source/libs/wal/src/walRef.c index fd1d51a4f14a549864e8c0b9a40993335fa9a2bc..b7169dec5387e247284cb95afc11eca452e97741 100644 --- a/source/libs/wal/src/walRef.c +++ b/source/libs/wal/src/walRef.c @@ -80,12 +80,3 @@ void walRefLastVer(SWal *pWal, SWalRef *pRef) { taosThreadMutexUnlock(&pWal->mutex); wDebug("vgId:%d, wal ref version %" PRId64 " for last", pWal->cfg.vgId, ver); } - -//void walRefCommitVer(SWal *pWal, SWalRef *pRef) { -// taosThreadMutexLock(&pWal->mutex); -// int64_t ver = walGetCommittedVer(pWal); -// pRef->refVer = ver; -// -// taosThreadMutexUnlock(&pWal->mutex); -// wDebug("vgId:%d, wal ref version %" PRId64 " for committed", pWal->cfg.vgId, ver); -//} diff --git a/source/util/src/terror.c b/source/util/src/terror.c index d7571b928388944a01118ae027a4ae8418e4edce..0a53ece746f31fb295f818a411ee4f778256f423 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -554,7 +554,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_COL_JSON, "Only tag can be jso TAOS_DEFINE_ERROR(TSDB_CODE_PAR_VALUE_TOO_LONG, "Value too long for column/tag") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_DELETE_WHERE, "The DELETE statement must have a definite time window range") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_REDISTRIBUTE_VG, "The REDISTRIBUTE VGROUP statement only support 1 to 3 dnodes") -TAOS_DEFINE_ERROR(TSDB_CODE_PAR_FILL_NOT_ALLOWED_FUNC, "Fill now allowed") +TAOS_DEFINE_ERROR(TSDB_CODE_PAR_FILL_NOT_ALLOWED_FUNC, "Fill not allowed") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_WINDOW_PC, "Invalid windows pc") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_WINDOW_NOT_ALLOWED_FUNC, "Window not allowed") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_STREAM_NOT_ALLOWED_FUNC, "Stream not allowed") @@ -566,6 +566,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_SMA_INDEX, "Invalid sma index") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_SELECTED_EXPR, "Invalid SELECTed expression") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_GET_META_ERROR, "Fail to get table info") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_NOT_UNIQUE_TABLE_ALIAS, "Not unique table/alias") +TAOS_DEFINE_ERROR(TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC, "System table not allowed") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INTERNAL_ERROR, "Parser internal error") //planner diff --git a/source/util/src/tlosertree.c b/source/util/src/tlosertree.c index c476baa7908172458cb5ffba718cd55fca62772d..f85ab0ecad01e8deb6d99315469e2133b5fc8282 100644 --- a/source/util/src/tlosertree.c +++ b/source/util/src/tlosertree.c @@ -71,12 +71,12 @@ int32_t tMergeTreeCreate(SMultiwayMergeTreeInfo** pTree, uint32_t numOfSources, return 0; } -void tMergeTreeDestroy(SMultiwayMergeTreeInfo* pTree) { - if (pTree == NULL) { +void tMergeTreeDestroy(SMultiwayMergeTreeInfo** pTree) { + if (pTree == NULL || *pTree == NULL) { return; } - taosMemoryFreeClear(pTree); + taosMemoryFreeClear(*pTree); } void tMergeTreeAdjust(SMultiwayMergeTreeInfo* pTree, int32_t idx) { diff --git a/source/util/src/tlrucache.c b/source/util/src/tlrucache.c index e182800d9c6219c0f0326a39319a3ddf67ae54b4..3de159797fc6027c3553e5b46c8f74b547de3406 100644 --- a/source/util/src/tlrucache.c +++ b/source/util/src/tlrucache.c @@ -39,6 +39,7 @@ enum { struct SLRUEntry { void *value; _taos_lru_deleter_t deleter; + void *ud; SLRUEntry *nextHash; SLRUEntry *next; SLRUEntry *prev; @@ -94,7 +95,7 @@ static void taosLRUEntryFree(SLRUEntry *entry) { ASSERT(entry->refs == 0); if (entry->deleter) { - (*entry->deleter)(entry->keyData, entry->keyLength, entry->value); + (*entry->deleter)(entry->keyData, entry->keyLength, entry->value, entry->ud); } taosMemoryFree(entry); @@ -146,6 +147,25 @@ static void taosLRUEntryTableCleanup(SLRUEntryTable *table) { taosMemoryFree(table->list); } +static int taosLRUEntryTableApplyF(SLRUEntryTable *table, _taos_lru_functor_t functor, void *ud) { + int ret = 0; + uint32_t end = 1 << table->lengthBits; + for (uint32_t i = 0; i < end; ++i) { + SLRUEntry *h = table->list[i]; + while (h) { + SLRUEntry *n = h->nextHash; + ASSERT(TAOS_LRU_ENTRY_IN_CACHE(h)); + ret = functor(h->keyData, h->keyLength, h->value, ud); + if (ret) { + return ret; + } + h = n; + } + } + + return ret; +} + static SLRUEntry **taosLRUEntryTableFindPtr(SLRUEntryTable *table, const void *key, size_t keyLen, uint32_t hash) { SLRUEntry **entry = &table->list[hash >> (32 - table->lengthBits)]; while (*entry && ((*entry)->hash != hash || memcmp(key, (*entry)->keyData, keyLen) != 0)) { @@ -424,7 +444,7 @@ static LRUStatus taosLRUCacheShardInsertEntry(SLRUCacheShard *shard, SLRUEntry * static LRUStatus taosLRUCacheShardInsert(SLRUCacheShard *shard, const void *key, size_t keyLen, uint32_t hash, void *value, size_t charge, _taos_lru_deleter_t deleter, LRUHandle **handle, - LRUPriority priority) { + LRUPriority priority, void *ud) { SLRUEntry *e = taosMemoryCalloc(1, sizeof(SLRUEntry) - 1 + keyLen); if (!e) { return TAOS_LRU_STATUS_FAIL; @@ -433,6 +453,7 @@ static LRUStatus taosLRUCacheShardInsert(SLRUCacheShard *shard, const void *key, e->value = value; e->flags = 0; e->deleter = deleter; + e->ud = ud; e->keyLength = keyLen; e->hash = hash; e->refs = 0; @@ -490,6 +511,18 @@ static void taosLRUCacheShardErase(SLRUCacheShard *shard, const void *key, size_ } } +static int taosLRUCacheShardApply(SLRUCacheShard *shard, _taos_lru_functor_t functor, void *ud) { + int ret; + + taosThreadMutexLock(&shard->mutex); + + ret = taosLRUEntryTableApplyF(&shard->table, functor, ud); + + taosThreadMutexUnlock(&shard->mutex); + + return ret; +} + static void taosLRUCacheShardEraseUnrefEntries(SLRUCacheShard *shard) { SArray *lastReferenceList = taosArrayInit(16, POINTER_BYTES); @@ -700,12 +733,12 @@ void taosLRUCacheCleanup(SLRUCache *cache) { } LRUStatus taosLRUCacheInsert(SLRUCache *cache, const void *key, size_t keyLen, void *value, size_t charge, - _taos_lru_deleter_t deleter, LRUHandle **handle, LRUPriority priority) { + _taos_lru_deleter_t deleter, LRUHandle **handle, LRUPriority priority, void *ud) { uint32_t hash = TAOS_LRU_CACHE_SHARD_HASH32(key, keyLen); uint32_t shardIndex = hash & cache->shardedCache.shardMask; return taosLRUCacheShardInsert(&cache->shards[shardIndex], key, keyLen, hash, value, charge, deleter, handle, - priority); + priority, ud); } LRUHandle *taosLRUCacheLookup(SLRUCache *cache, const void *key, size_t keyLen) { @@ -722,6 +755,15 @@ void taosLRUCacheErase(SLRUCache *cache, const void *key, size_t keyLen) { return taosLRUCacheShardErase(&cache->shards[shardIndex], key, keyLen, hash); } +void taosLRUCacheApply(SLRUCache *cache, _taos_lru_functor_t functor, void *ud) { + int numShards = cache->numShards; + for (int i = 0; i < numShards; ++i) { + if (taosLRUCacheShardApply(&cache->shards[i], functor, ud)) { + break; + } + } +} + void taosLRUCacheEraseUnrefEntries(SLRUCache *cache) { int numShards = cache->numShards; for (int i = 0; i < numShards; ++i) { diff --git a/tests/develop-test/win-test-file b/tests/develop-test/win-test-file index e4f3bcf56ee0ea3154232d384e005855b60e3c82..b640ef6bfe60d94dd4d3d2430edd71e89af120d8 100644 --- a/tests/develop-test/win-test-file +++ b/tests/develop-test/win-test-file @@ -9,5 +9,6 @@ python3 ./test.py -f 5-taos-tools/taosbenchmark/invalid_commandline.py python3 ./test.py -f 5-taos-tools/taosbenchmark/json_tag.py python3 ./test.py -f 5-taos-tools/taosbenchmark/query_json.py python3 ./test.py -f 5-taos-tools/taosbenchmark/sample_csv_json.py +python3 ./test.py -f 5-taos-tools/taosbenchmark/sml_json_alltypes.py python3 ./test.py -f 5-taos-tools/taosbenchmark/taosdemoTestQueryWithJson.py -R python3 ./test.py -f 5-taos-tools/taosbenchmark/telnet_tcp.py -R diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index e24045207a50117dd1ff9aa3fc6b6917d613eb12..1dff2a90d022ac9bd2e3328c09354d7a6cfe3a2f 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -34,6 +34,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/subscribeDb0.py -N 3 -n 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/ins_topics_test.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqParamsTest.py +,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqClientConsLog.py ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/delete_stable.py @@ -400,6 +401,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/concat2.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/json_tag.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQueryInterval.py +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/systable_func.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stablity.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stablity_1.py @@ -442,7 +444,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6 -n 3 #,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py -N 5 -,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRecreateMnode.py -N 5 -M 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRecreateMnode.py -N 6 -M 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeStopFollowerLeader.py -N 5 -M 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeStop2Follower.py -N 5 -M 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py -N 4 -M 1 @@ -518,7 +520,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/csum.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/mavg.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sample.py -Q 2 -#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_diff.py -Q 2 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_diff.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/unique.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stateduration.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_stateduration.py -Q 2 @@ -613,7 +615,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/csum.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/mavg.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sample.py -Q 3 -#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_diff.py -Q 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_diff.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/unique.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stateduration.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_stateduration.py -Q 3 @@ -709,7 +711,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/mavg.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sample.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/cast.py -Q 4 -#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_diff.py -Q 4 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_diff.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/unique.py -Q 4 #,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stateduration.py -Q 4 #,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_stateduration.py -Q 4 diff --git a/tests/parallel_test/split_case.sh b/tests/parallel_test/split_case.sh index af601ed9a6a1c4d371e56d94aa45bc56b8715793..4e2c535fafab9a7d593b11f624149ec1dd20c6bf 100755 --- a/tests/parallel_test/split_case.sh +++ b/tests/parallel_test/split_case.sh @@ -5,6 +5,8 @@ parm_path=$(pwd ${parm_path}) echo "execute path:${parm_path}" cd ${parm_path} cp cases.task ${case_file} +sed -i '/udf/d' ${case_file} +sed -i '/Udf/d' ${case_file} sed -i '/^$/d' ${case_file} sed -i '$a\%%FINISHED%%' ${case_file} diff --git a/tests/script/win-test-file b/tests/script/win-test-file index adef71cb45a0de2b570f649eedc5f2dcebad3ca4..d394ce68762777d3d6b81456f929174b6cb5603e 100644 --- a/tests/script/win-test-file +++ b/tests/script/win-test-file @@ -1,3 +1,28 @@ +./test.sh -f tsim/tmq/basic2Of2ConsOverlap.sim +./test.sh -f tsim/parser/where.sim +./test.sh -f tsim/parser/join_manyblocks.sim +./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_leader.sim +./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_follower.sim +./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v2.sim +./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v3.sim +./test.sh -f tsim/parser/limit1.sim +./test.sh -f tsim/parser/union.sim +./test.sh -f tsim/parser/commit.sim +./test.sh -f tsim/parser/nestquery.sim +./test.sh -f tsim/valgrind/checkError7.sim +./test.sh -f tsim/parser/groupby.sim +./test.sh -f tsim/parser/sliding.sim +./test.sh -f tsim/dnode/balance2.sim +./test.sh -f tsim/vnode/replica3_repeat.sim +./test.sh -f tsim/parser/col_arithmetic_operation.sim +./test.sh -f tsim/trans/create_db.sim +./test.sh -f tsim/dnode/balance3.sim +./test.sh -f tsim/vnode/replica3_many.sim +./test.sh -f tsim/stable/metrics_idx.sim +./test.sh -f tsim/db/alter_replica_13.sim +./test.sh -f tsim/sync/3Replica1VgElect.sim +./test.sh -f tsim/sync/3Replica5VgElect.sim +./test.sh -f tsim/valgrind/checkError6.sim ./test.sh -f tsim/user/basic.sim ./test.sh -f tsim/user/password.sim ./test.sh -f tsim/user/privilege_db.sim @@ -6,7 +31,6 @@ ./test.sh -f tsim/user/privilege_table.sim ./test.sh -f tsim/user/privilege_create_db.sim ./test.sh -f tsim/db/alter_option.sim -rem ./test.sh -f tsim/db/alter_replica_13.sim ./test.sh -f tsim/db/alter_replica_31.sim ./test.sh -f tsim/db/basic1.sim ./test.sh -f tsim/db/basic2.sim @@ -30,11 +54,10 @@ rem ./test.sh -f tsim/db/alter_replica_13.sim ./test.sh -f tsim/db/show_create_table.sim ./test.sh -f tsim/db/tables.sim ./test.sh -f tsim/db/taosdlog.sim +./test.sh -f tsim/db/table_prefix_suffix.sim ./test.sh -f tsim/dnode/balance_replica1.sim ./test.sh -f tsim/dnode/balance_replica3.sim ./test.sh -f tsim/dnode/balance1.sim -./test.sh -f tsim/dnode/balance2.sim -./test.sh -f tsim/dnode/balance3.sim ./test.sh -f tsim/dnode/balancex.sim ./test.sh -f tsim/dnode/create_dnode.sim ./test.sh -f tsim/dnode/drop_dnode_has_mnode.sim @@ -46,10 +69,6 @@ rem ./test.sh -f tsim/db/alter_replica_13.sim ./test.sh -f tsim/dnode/drop_dnode_force.sim ./test.sh -f tsim/dnode/offline_reason.sim ./test.sh -f tsim/dnode/redistribute_vgroup_replica1.sim -./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_leader.sim -./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_follower.sim -./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v2.sim -./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v3.sim ./test.sh -f tsim/dnode/vnode_clean.sim ./test.sh -f tsim/dnode/use_dropped_dnode.sim ./test.sh -f tsim/dnode/split_vgroup_replica1.sim @@ -75,6 +94,7 @@ rem ./test.sh -f tsim/db/alter_replica_13.sim ./test.sh -f tsim/insert/query_multi_file.sim ./test.sh -f tsim/insert/tcp.sim ./test.sh -f tsim/insert/update0.sim +./test.sh -f tsim/insert/delete0.sim ./test.sh -f tsim/insert/update1_sort_merge.sim ./test.sh -f tsim/insert/update2.sim ./test.sh -f tsim/parser/alter__for_community_version.sim @@ -86,7 +106,6 @@ rem ./test.sh -f tsim/db/alter_replica_13.sim ./test.sh -f tsim/parser/auto_create_tb.sim ./test.sh -f tsim/parser/between_and.sim ./test.sh -f tsim/parser/binary_escapeCharacter.sim -./test.sh -f tsim/parser/col_arithmetic_operation.sim ./test.sh -f tsim/parser/columnValue_bigint.sim ./test.sh -f tsim/parser/columnValue_bool.sim ./test.sh -f tsim/parser/columnValue_double.sim @@ -95,8 +114,8 @@ rem ./test.sh -f tsim/db/alter_replica_13.sim ./test.sh -f tsim/parser/columnValue_smallint.sim ./test.sh -f tsim/parser/columnValue_tinyint.sim ./test.sh -f tsim/parser/columnValue_unsign.sim -./test.sh -f tsim/parser/commit.sim ./test.sh -f tsim/parser/condition.sim +./test.sh -f tsim/parser/condition_scl.sim ./test.sh -f tsim/parser/constCol.sim ./test.sh -f tsim/parser/create_db.sim ./test.sh -f tsim/parser/create_mt.sim @@ -112,7 +131,6 @@ rem ./test.sh -f tsim/db/alter_replica_13.sim ./test.sh -f tsim/parser/fourArithmetic-basic.sim ./test.sh -f tsim/parser/function.sim ./test.sh -f tsim/parser/groupby-basic.sim -./test.sh -f tsim/parser/groupby.sim ./test.sh -f tsim/parser/having_child.sim ./test.sh -f tsim/parser/having.sim ./test.sh -f tsim/parser/import_commit1.sim @@ -122,7 +140,6 @@ rem ./test.sh -f tsim/db/alter_replica_13.sim ./test.sh -f tsim/parser/import.sim ./test.sh -f tsim/parser/insert_multiTbl.sim ./test.sh -f tsim/parser/insert_tb.sim -./test.sh -f tsim/parser/join_manyblocks.sim ./test.sh -f tsim/parser/join_multitables.sim ./test.sh -f tsim/parser/join_multivnode.sim ./test.sh -f tsim/parser/join.sim @@ -132,10 +149,8 @@ rem ./test.sh -f tsim/db/alter_replica_13.sim ./test.sh -f tsim/parser/lastrow2.sim ./test.sh -f tsim/parser/like.sim ./test.sh -f tsim/parser/limit.sim -./test.sh -f tsim/parser/limit1.sim ./test.sh -f tsim/parser/mixed_blocks.sim ./test.sh -f tsim/parser/nchar.sim -./test.sh -f tsim/parser/nestquery.sim ./test.sh -f tsim/parser/null_char.sim ./test.sh -f tsim/parser/precision_ns.sim ./test.sh -f tsim/parser/projection_limit_offset.sim @@ -148,7 +163,6 @@ rem ./test.sh -f tsim/db/alter_replica_13.sim ./test.sh -f tsim/parser/selectResNum.sim ./test.sh -f tsim/parser/set_tag_vals.sim ./test.sh -f tsim/parser/single_row_in_tb.sim -./test.sh -f tsim/parser/sliding.sim ./test.sh -f tsim/parser/slimit_alter_tags.sim ./test.sh -f tsim/parser/slimit.sim ./test.sh -f tsim/parser/slimit1.sim @@ -159,9 +173,9 @@ rem ./test.sh -f tsim/db/alter_replica_13.sim ./test.sh -f tsim/parser/timestamp.sim ./test.sh -f tsim/parser/top_groupby.sim ./test.sh -f tsim/parser/topbot.sim -./test.sh -f tsim/parser/union.sim ./test.sh -f tsim/parser/union_sysinfo.sim -./test.sh -f tsim/parser/where.sim +./test.sh -f tsim/parser/slimit_limit.sim +./test.sh -f tsim/parser/table_merge_limit.sim ./test.sh -f tsim/query/tagLikeFilter.sim ./test.sh -f tsim/query/charScalarFunction.sim ./test.sh -f tsim/query/explain.sim @@ -170,12 +184,21 @@ rem ./test.sh -f tsim/db/alter_replica_13.sim ./test.sh -f tsim/query/scalarFunction.sim ./test.sh -f tsim/query/scalarNull.sim ./test.sh -f tsim/query/session.sim +./test.sh -f tsim/query/join_interval.sim +./test.sh -f tsim/query/unionall_as_table.sim +./test.sh -f tsim/query/multi_order_by.sim ./test.sh -f tsim/query/sys_tbname.sim ./test.sh -f tsim/query/groupby.sim +./test.sh -f tsim/query/groupby_distinct.sim ./test.sh -f tsim/query/event.sim ./test.sh -f tsim/query/forceFill.sim ./test.sh -f tsim/query/emptyTsRange.sim +./test.sh -f tsim/query/emptyTsRange_scl.sim ./test.sh -f tsim/query/partitionby.sim +./test.sh -f tsim/query/tableCount.sim +./test.sh -f tsim/query/tag_scan.sim +./test.sh -f tsim/query/nullColSma.sim +./test.sh -f tsim/query/bug3398.sim ./test.sh -f tsim/qnode/basic1.sim ./test.sh -f tsim/snode/basic1.sim ./test.sh -f tsim/mnode/basic1.sim @@ -258,7 +281,6 @@ rem ./test.sh -f tsim/db/alter_replica_13.sim ./test.sh -f tsim/stream/udTableAndTag2.sim ./test.sh -f tsim/stream/windowClose.sim ./test.sh -f tsim/trans/lossdata1.sim -./test.sh -f tsim/trans/create_db.sim ./test.sh -f tsim/tmq/basic1.sim ./test.sh -f tsim/tmq/basic2.sim ./test.sh -f tsim/tmq/basic3.sim @@ -267,7 +289,6 @@ rem ./test.sh -f tsim/db/alter_replica_13.sim ./test.sh -f tsim/tmq/basic2Of2Cons.sim ./test.sh -f tsim/tmq/basic3Of2Cons.sim ./test.sh -f tsim/tmq/basic4Of2Cons.sim -./test.sh -f tsim/tmq/basic2Of2ConsOverlap.sim ./test.sh -f tsim/tmq/topic.sim ./test.sh -f tsim/tmq/snapshot.sim ./test.sh -f tsim/tmq/snapshot1.sim @@ -291,7 +312,6 @@ rem ./test.sh -f tsim/db/alter_replica_13.sim ./test.sh -f tsim/stable/tag_rename.sim ./test.sh -f tsim/stable/values.sim ./test.sh -f tsim/stable/vnode3.sim -./test.sh -f tsim/stable/metrics_idx.sim ./test.sh -f tsim/sma/drop_sma.sim ./test.sh -f tsim/sma/sma_leak.sim ./test.sh -f tsim/sma/tsmaCreateInsertQuery.sim @@ -302,13 +322,9 @@ rem ./test.sh -f tsim/db/alter_replica_13.sim ./test.sh -f tsim/valgrind/checkError3.sim ./test.sh -f tsim/valgrind/checkError4.sim ./test.sh -f tsim/valgrind/checkError5.sim -./test.sh -f tsim/valgrind/checkError6.sim -./test.sh -f tsim/valgrind/checkError7.sim ./test.sh -f tsim/valgrind/checkError8.sim ./test.sh -f tsim/vnode/replica3_basic.sim -./test.sh -f tsim/vnode/replica3_repeat.sim ./test.sh -f tsim/vnode/replica3_vgroup.sim -./test.sh -f tsim/vnode/replica3_many.sim ./test.sh -f tsim/vnode/replica3_import.sim ./test.sh -f tsim/vnode/stable_balance_replica1.sim ./test.sh -f tsim/vnode/stable_dnode2_stop.sim @@ -316,8 +332,6 @@ rem ./test.sh -f tsim/db/alter_replica_13.sim ./test.sh -f tsim/vnode/stable_dnode3.sim ./test.sh -f tsim/vnode/stable_replica3_dnode6.sim ./test.sh -f tsim/vnode/stable_replica3_vnode3.sim -./test.sh -f tsim/sync/3Replica1VgElect.sim -./test.sh -f tsim/sync/3Replica5VgElect.sim ./test.sh -f tsim/sync/oneReplica1VgElect.sim ./test.sh -f tsim/sync/oneReplica5VgElect.sim ./test.sh -f tsim/catalog/alterInCurrent.sim diff --git a/tests/system-test/0-others/information_schema.py b/tests/system-test/0-others/information_schema.py index 8245407ade6415bfc198e27107f0b7b54ca0e79f..762361f0511d0a19fcadeffca40de45687694f39 100644 --- a/tests/system-test/0-others/information_schema.py +++ b/tests/system-test/0-others/information_schema.py @@ -132,8 +132,61 @@ class TDTestCase: else: tdSql.checkEqual(result[i][0],f'stb_{i-1}') tdSql.checkEqual(result[i][1],ctbnum) + + def ins_stable_check2(self): + tdSql.execute('drop database if exists restful_test') + tdSql.execute('drop database if exists log') + tdSql.execute('drop database if exists d0') + tdSql.execute('drop database if exists d1') + tdSql.execute('create database restful_test vgroups 4 replica 1') + tdSql.execute('create database log vgroups 2 replica 1') + tdSql.execute('create database d0 vgroups 4 replica 1') + tdSql.execute('create database d1 vgroups 4 replica 1') + log_stb_num = 5 + rest_stb_num = 51 + for i in range(rest_stb_num): + tdSql.execute(f'create stable restful_test._stb_{i} (ts timestamp,c0 int) tags(t0 int);') + tdSql.execute(f'create stable d0._stb_{i} (ts timestamp,c0 int, c1 int) tags(t0 int,t1 int);') + tdSql.execute(f'create stable d1._stb_{i} (ts timestamp,c0 int, c1 int, c2 int) tags(t0 int,t1 int, t2 int);') + tdSql.execute(f'CREATE STABLE log.`taosadapter_restful_http_request_summary_milliseconds` (`_ts` TIMESTAMP, `sum` DOUBLE) TAGS (`request_uri` NCHAR(128));') + tdSql.execute(f'CREATE STABLE log.`taosadapter_system_cpu_percent` (`_ts` TIMESTAMP, `gauge` DOUBLE) TAGS (`endpoint` NCHAR(45));') + tdSql.execute(f'CREATE STABLE log.`taosadapter_restful_http_request_total` (`_ts` TIMESTAMP, `gauge` DOUBLE) TAGS (`client_ip` NCHAR(40));') + tdSql.execute(f'CREATE STABLE log.`taosadapter_system_mem_percent` (`_ts` TIMESTAMP, `gauge` DOUBLE) TAGS (`endpoint` NCHAR(45));') + tdSql.execute(f'CREATE STABLE log.`taosadapter_restful_http_request_fail` (`_ts` TIMESTAMP, `gauge` DOUBLE) TAGS (`request_uri` NCHAR(128), `status_code` NCHAR(4));') + + tdSql.query(f'select * from information_schema.ins_stables where db_name="restful_test" limit 0,25;') # condition 1 + result = tdSql.queryResult + tdSql.checkEqual(len(result),25) + for i in range(len(result)): + tdSql.checkEqual(result[i][0][0:5],f'_stb_') # stable_name + tdSql.checkEqual(result[i][1],f'restful_test') # db_name + tdSql.checkEqual(result[i][5]>=result[i][2],True) # last_update >= create_time + tdSql.checkEqual(result[i][3]>1,True) # columns + tdSql.checkEqual(result[i][4]>0,True) # tags + tdSql.checkEqual(result[i][6],None) # table_comment + tdSql.checkEqual(result[i][7],f'5000a,5000a') # watermark + tdSql.checkEqual(result[i][8],f'-1a,-1a') # max_delay + tdSql.checkEqual(result[i][9],f'') # rollup + tdSql.query(f'select create_time from information_schema.ins_stables where db_name="restful_test" order by create_time asc limit 10,1') + result = tdSql.queryResult + tdSql.checkEqual(len(result),1) + _create_time=result[0][0] + tdSql.query("select * from information_schema.ins_stables where db_name='restful_test' and create_time > '%s' limit 10,30" % (_create_time)) # condition 2 + result = tdSql.queryResult + tdSql.checkEqual(len(result),30) + for i in range(len(result)): + tdSql.checkEqual(result[i][0][0:5],f'_stb_') # stable_name + tdSql.checkEqual(result[i][1],f'restful_test') # db_name + tdSql.checkEqual(result[i][5]>=result[i][2],True) # last_update >= create_time + tdSql.checkEqual(result[i][2]>_create_time,True) # create_time + tdSql.checkEqual(result[i][3]>1,True) # columns + tdSql.checkEqual(result[i][4]>0,True) # tags + tdSql.checkEqual(result[i][6],None) # table_comment + tdSql.checkEqual(result[i][7],f'5000a,5000a') # watermark + tdSql.checkEqual(result[i][8],f'-1a,-1a') # max_delay + tdSql.checkEqual(result[i][9],f'') # rollup def ins_columns_check(self): tdSql.execute('drop database if exists db2') @@ -216,6 +269,7 @@ class TDTestCase: self.ins_columns_check() # self.ins_col_check_4096() self.ins_stable_check() + self.ins_stable_check2() self.ins_dnodes_check() @@ -224,4 +278,4 @@ class TDTestCase: tdLog.success("%s successfully executed" % __file__) tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/Timediff.py b/tests/system-test/2-query/Timediff.py index 4e72c07b303499145c24cec63138367d54750a34..a7366a4007135d528741ced696705925fc69a70f 100644 --- a/tests/system-test/2-query/Timediff.py +++ b/tests/system-test/2-query/Timediff.py @@ -4,6 +4,8 @@ from util.cases import * from util.gettime import * class TDTestCase: + updatecfgDict = {'keepColumnName': 1} + def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") @@ -27,14 +29,14 @@ class TDTestCase: self.ctbname = f'{self.dbname}.ctb' self.subtractor = 1 # unit:s def check_tbtype(self,tb_type): - if tb_type.lower() == 'ntb': + if tb_type.lower() == 'ntb': tdSql.query(f'select timediff(ts,{self.subtractor}) from {self.ntbname}') elif tb_type.lower() == 'ctb': tdSql.query(f'select timediff(ts,{self.subtractor}) from {self.ctbname}') elif tb_type.lower() == 'stb': tdSql.query(f'select timediff(ts,{self.subtractor}) from {self.stbname}') def check_tb_type(self,unit,tb_type): - if tb_type.lower() == 'ntb': + if tb_type.lower() == 'ntb': tdSql.query(f'select timediff(ts,{self.subtractor},{unit}) from {self.ntbname}') elif tb_type.lower() == 'ctb': tdSql.query(f'select timediff(ts,{self.subtractor},{unit}) from {self.ctbname}') @@ -43,7 +45,7 @@ class TDTestCase: def data_check(self,date_time,precision,tb_type): for unit in self.time_unit: if (unit.lower() == '1u' and precision.lower() == 'ms') or (unit.lower() == '1b' and precision.lower() == 'us') or (unit.lower() == '1b' and precision.lower() == 'ms'): - if tb_type.lower() == 'ntb': + if tb_type.lower() == 'ntb': tdSql.error(f'select timediff(ts,{self.subtractor},{unit}) from {self.ntbname}') elif tb_type.lower() == 'ctb': tdSql.error(f'select timediff(ts,{self.subtractor},{unit}) from {self.ctbname}') @@ -66,7 +68,7 @@ class TDTestCase: tdSql.checkEqual(tdSql.queryResult[i][0],int(((date_time[i]/1000)-self.subtractor)/60/60)) elif unit.lower() == '1d': for i in range(len(self.ts_str)): - tdSql.checkEqual(tdSql.queryResult[i][0],int(((date_time[i]/1000)-self.subtractor)/60/60/24)) + tdSql.checkEqual(tdSql.queryResult[i][0],int(((date_time[i]/1000)-self.subtractor)/60/60/24)) elif unit.lower() == '1w': for i in range(len(self.ts_str)): tdSql.checkEqual(tdSql.queryResult[i][0],int(((date_time[i]/1000)-self.subtractor)/60/60/24/7)) @@ -97,7 +99,7 @@ class TDTestCase: tdSql.checkEqual(tdSql.queryResult[i][0],int(((date_time[i]/1000)-self.subtractor*1000))) elif unit.lower() == '1u': for i in range(len(self.ts_str)): - tdSql.checkEqual(tdSql.queryResult[i][0],int(((date_time[i])-self.subtractor*1000000))) + tdSql.checkEqual(tdSql.queryResult[i][0],int(((date_time[i])-self.subtractor*1000000))) self.check_tbtype(tb_type) tdSql.checkRows(len(self.ts_str)) for i in range(len(self.ts_str)): @@ -185,8 +187,16 @@ class TDTestCase: elif precision.lower() == 'ns': for i in range(len(self.ts_str)): tdSql.checkEqual(tdSql.queryResult[i][0],int(((date_time[i])-self.subtractor*1000000000))) - + def function_multi_res_param(self): + tdSql.execute(f'drop database if exists {self.dbname}') + tdSql.execute(f'create database {self.dbname}') + tdSql.execute(f'use {self.dbname}') + tdSql.execute(f'create table {self.ntbname} (ts timestamp,c0 int)') + tdSql.execute(f'insert into {self.ntbname} values("2023-01-01 00:00:00",1)') + tdSql.execute(f'insert into {self.ntbname} values("2023-01-01 00:01:00",2)') + tdSql.query(f'select timediff(last(ts), first(ts)) from {self.ntbname}') + tdSql.checkData(0, 0, 60000) @@ -194,7 +204,8 @@ class TDTestCase: self.function_check_ntb() self.function_check_stb() self.function_without_param() - + self.function_multi_res_param() + def stop(self): tdSql.close() tdLog.success(f"{__file__} successfully executed") diff --git a/tests/system-test/2-query/systable_func.py b/tests/system-test/2-query/systable_func.py new file mode 100644 index 0000000000000000000000000000000000000000..3df641c72e15c4a3109a97f44886758e7a6dd247 --- /dev/null +++ b/tests/system-test/2-query/systable_func.py @@ -0,0 +1,60 @@ +import taos +import sys + +from util.log import * +from util.sql import * +from util.cases import * + + + +class TDTestCase: + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + #tdSql.init(conn.cursor()) + tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def run(self): + tdSql.prepare() + + tdSql.query(f"select count(`columns`) from `information_schema`.`ins_tables`;") + tdSql.query(f"select sum(`columns`) from `information_schema`.`ins_tables`;") + tdSql.query(f"select min(`columns`) from `information_schema`.`ins_tables`;") + tdSql.query(f"select max(`columns`) from `information_schema`.`ins_tables`;") + tdSql.query(f"select stddev(`columns`) from `information_schema`.`ins_tables`;") + tdSql.query(f"select avg(`columns`) from `information_schema`.`ins_tables`;") + tdSql.query(f"select apercentile(`columns`, 50) from `information_schema`.`ins_tables`;") + tdSql.query(f"select top(`columns`, 3) from `information_schema`.`ins_tables`;") + tdSql.query(f"select bottom(`columns`, 3) from `information_schema`.`ins_tables`;") + tdSql.query(f"select spread(`columns`) from `information_schema`.`ins_tables`;") + tdSql.query(f"select histogram(`columns`, 'user_input', '[1, 3, 5]', 0) from `information_schema`.`ins_tables`;") + tdSql.query(f"select hyperloglog(`columns`) from `information_schema`.`ins_tables`;") + tdSql.query(f"select sample(`columns`, 3) from `information_schema`.`ins_tables`;") + tdSql.query(f"select mode(`columns`) from `information_schema`.`ins_tables`;") + + tdSql.error(f"select unique(`columns`) from `information_schema`.`ins_tables`;") + tdSql.error(f"select tail(`columns`, 3) from `information_schema`.`ins_tables`;") + tdSql.error(f"select leastsquares(`columns`, 1, 1) from `information_schema`.`ins_tables`;") + tdSql.error(f"select elapsed(`columns`) from `information_schema`.`ins_tables`;") + tdSql.error(f"select interp(`columns`) from `information_schema`.`ins_tables` range(0, 1) every(1s) fill(null);") + tdSql.error(f"select percentile(`columns`, 50) from `information_schema`.`ins_tables`;") + tdSql.error(f"select derivative(`columns`, 1s, 0) from `information_schema`.`ins_tables`;") + tdSql.error(f"select irate(`columns`) from `information_schema`.`ins_tables`;") + tdSql.error(f"select last_row(`columns`) from `information_schema`.`ins_tables`;") + tdSql.error(f"select last(`columns`) from `information_schema`.`ins_tables`;") + tdSql.error(f"select first(`columns`) from `information_schema`.`ins_tables`;") + tdSql.error(f"select twa(`columns`) from `information_schema`.`ins_tables`;") + tdSql.error(f"select diff(`columns`) from `information_schema`.`ins_tables`;") + tdSql.error(f"select statecount(`columns`, 'GE', 0) from `information_schema`.`ins_tables`;") + tdSql.error(f"select stateduration(`columns`, 'GE', 0, 1s) from `information_schema`.`ins_tables`;") + tdSql.error(f"select csum(`columns`) from `information_schema`.`ins_tables`;") + tdSql.error(f"select mavg(`columns`, 1) from `information_schema`.`ins_tables`;") + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/5dnode3mnodeRecreateMnode.py b/tests/system-test/6-cluster/5dnode3mnodeRecreateMnode.py index 41082baa3da6161d378e66923a0473d3e2c1951f..d75cd4923c2266cec1423cc1e54efd7796931442 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeRecreateMnode.py +++ b/tests/system-test/6-cluster/5dnode3mnodeRecreateMnode.py @@ -136,7 +136,7 @@ class TDTestCase: tdLog.info("Confirm the status of the dnode again") tdSql.error("create mnode on dnode 2") tdSql.query("select * from information_schema.ins_dnodes;") - print(tdSql.queryResult) + # print(tdSql.queryResult) clusterComCheck.checkDnodes(dnodeNumbers) # recreate mnode @@ -160,80 +160,38 @@ class TDTestCase: if tdSql.queryResult[i][0] == "%s"%username : tdLog.info("create user:%s successfully"%username) - # # create database and stable - # clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica']) - # tdLog.info("Take turns stopping Mnodes ") - - # tdDnodes=cluster.dnodes - # stopcount =0 - # threads=[] - - # # create stable:stb_0 - # stableName= paraDict['stbName'] - # newTdSql=tdCom.newTdSql() - # clusterComCreate.create_stables(newTdSql, paraDict["dbName"],stableName,paraDict['stbNumbers']) - # #create child table:ctb_0 - # for i in range(paraDict['stbNumbers']): - # stableName= '%s_%d'%(paraDict['stbName'],i) - # newTdSql=tdCom.newTdSql() - # clusterComCreate.create_ctable(newTdSql, paraDict["dbName"],stableName,stableName, paraDict['ctbNum']) - # #insert date - # for i in range(paraDict['stbNumbers']): - # stableName= '%s_%d'%(paraDict['stbName'],i) - # newTdSql=tdCom.newTdSql() - # threads.append(threading.Thread(target=clusterComCreate.insert_data, args=(newTdSql, paraDict["dbName"],stableName,paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"]))) - # for tr in threads: - # tr.start() - # for tr in threads: - # tr.join() - - # while stopcount < restartNumbers: - # tdLog.info(" restart loop: %d"%stopcount ) - # if stopRole == "mnode": - # for i in range(mnodeNums): - # tdDnodes[i].stoptaosd() - # # sleep(10) - # tdDnodes[i].starttaosd() - # # sleep(10) - # elif stopRole == "vnode": - # for i in range(vnodeNumbers): - # tdDnodes[i+mnodeNums].stoptaosd() - # # sleep(10) - # tdDnodes[i+mnodeNums].starttaosd() - # # sleep(10) - # elif stopRole == "dnode": - # for i in range(dnodeNumbers): - # tdDnodes[i].stoptaosd() - # # sleep(10) - # tdDnodes[i].starttaosd() - # # sleep(10) - - # # dnodeNumbers don't include database of schema - # if clusterComCheck.checkDnodes(dnodeNumbers): - # tdLog.info("dnode is ready") - # else: - # print("dnodes is not ready") - # self.stopThread(threads) - # tdLog.exit("one or more of dnodes failed to start ") - # # self.check3mnode() - # stopcount+=1 - - - # clusterComCheck.checkDnodes(dnodeNumbers) - # clusterComCheck.checkDbRows(dbNumbers) - # # clusterComCheck.checkDb(dbNumbers,1,paraDict["dbName"]) - - # tdSql.execute("use %s" %(paraDict["dbName"])) - # tdSql.query("show stables") - # tdSql.checkRows(paraDict["stbNumbers"]) - # # for i in range(paraDict['stbNumbers']): - # # stableName= '%s_%d'%(paraDict['stbName'],i) - # # tdSql.query("select * from %s"%stableName) - # # tdSql.checkRows(rowsPerStb) + """ case for TS-3524 and test 'taos -h' """ + bPath = self.getBuildPath() + for i in range(6): + nodePort = 6030 + i*100 + newTdSql=tdCom.newTdSql(port=nodePort) + + dataPath = tdDnodes[1].dataDir + os.system(f"rm -rf {dataPath}/*") + os.system(f"rm -rf {dataPath}/.runing") + + tdDnodes[1].stoptaosd() + tdDnodes[1].starttaosd() + sleep(5) + for i in range(6): + nodePort = 6030 + i*100 + newTdSql=tdCom.newTdSql(port=nodePort) + + dataPath = tdDnodes[0].dataDir + os.system(f"rm -rf {dataPath}/*") + os.system(f"rm -rf {dataPath}/.runing") + + tdDnodes[0].stoptaosd() + tdDnodes[0].starttaosd() + sleep(5) + for i in range(6): + nodePort = 6030 + i*100 + newTdSql=tdCom.newTdSql(port=nodePort) + def run(self): # print(self.master_dnode.cfgDict) - self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=1,stopRole='dnode') + self.fiveDnodeThreeMnode(dnodeNumbers=6,mnodeNums=3,restartNumbers=1,stopRole='dnode') def stop(self): tdSql.close() diff --git a/tests/system-test/7-tmq/tmqClientConsLog.py b/tests/system-test/7-tmq/tmqClientConsLog.py new file mode 100644 index 0000000000000000000000000000000000000000..a56bdecb5895481ca5e6460a376e1157747f3798 --- /dev/null +++ b/tests/system-test/7-tmq/tmqClientConsLog.py @@ -0,0 +1,228 @@ + +import taos +import sys +import time +import socket +import os +import threading +import math + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + def __init__(self): + self.vgroups = 3 + self.ctbNum = 10 + self.rowsPerTbl = 1000 + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 2, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 1000, + 'batchNum': 100, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 10, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 0} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=self.replicaVar) + tdSql.execute("alter database %s wal_retention_period 3600" % (paraDict['dbName'])) + tdLog.info("create stb") + tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) + tdLog.info("create ctb") + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("insert data") + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + # tdLog.info("restart taosd to ensure that the data falls into the disk") + # tdDnodes.stop(1) + # tdDnodes.start(1) + # tdSql.query("flush database %s"%(paraDict['dbName'])) + return + + def updateRowsOfConsumer(self, consumerDict, consumerId, totalRowsOfConsumer): + for key in consumerDict: + if key == consumerId: + consumerDict[key] = totalRowsOfConsumer + return + + consumerDict[consumerId] = totalRowsOfConsumer + return + + def checkClientLog(self, actConsumeTotalRows, numOfConsumer): + # 01931245 TSC consumer:0x5ee20f124420000c process poll rsp, vgId:5, offset:log:3399, blocks:2, rows:6000 vg total:330000 total:654000, reqId:0xa77d2245ae20112 + # 01931245 TSC consumer:0x5ee20f124420000c process poll rsp, vgId:7, offset:log:3384, blocks:1, rows:2000 vg total:326000 total:656000, reqId:0xa77d2245b050113 + # 01931246 TSC consumer:0x5ee20f124420000d process poll rsp, vgId:6, offset:log:3400, blocks:2, rows:6000 vg total:330000 total:330000, reqId:0xa77d2245b380116 + # 01931246 TSC consumer:0x5ee20f124420000d process poll rsp, vgId:6, offset:log:3460, blocks:2, rows:6000 vg total:336000 total:336000, reqId:0xa77d2245b8f011a + # 01931246 TSC consumer:0x5ee20f124420000d process poll rsp, vgId:6, offset:log:3520, blocks:2, rows:6000 vg total:342000 total:342000, reqId:0xa77d2245beb011f + # 01931246 TSC consumer:0x5ee20f124420000d process poll rsp, vgId:6, offset:log:3567, blocks:1, rows:2000 vg total:344000 total:344000, reqId:0xa77d2245c430121 + # filter key: process poll rsp, vgId + + tdLog.printNoPrefix("======== start filter key info from client log file") + + cfgPath = tdCom.getClientCfgPath() + taosLogFile = '%s/../log/taoslog*'%(cfgPath) + filterResultFile = '%s/../log/filter'%(cfgPath) + cmdStr = 'grep "process poll rsp, vgId:" %s >> %s'%(taosLogFile, filterResultFile) + tdLog.info(cmdStr) + os.system(cmdStr) + + consumerDict = {} + for index, line in enumerate(open(filterResultFile,'r')): + # tdLog.info("row[%d]: %s"%(index, line)) + valueList = line.split(',') + # for i in range(len(valueList)): + # tdLog.info("index[%d]: %s"%(i, valueList[i])) + # get consumer id + list2 = valueList[0].split(':') + list3 = list2[4].split() + consumerId = list3[0] + print("consumerId: %s"%(consumerId)) + + # # get vg id + # list2 = valueList[1].split(':') + # vgId = list2[1] + # print("vgId: %s"%(vgId)) + + # get total rows of a certain consuer + list2 = valueList[6].split(':') + totalRowsOfConsumer = list2[1] + print("totalRowsOfConsumer: %s"%(totalRowsOfConsumer)) + + # update a certain info + self.updateRowsOfConsumer(consumerDict, consumerId, totalRowsOfConsumer) + + # print(consumerDict) + if numOfConsumer != len(consumerDict): + tdLog.info("expect consumer num: %d, act consumer num: %d"%(numOfConsumer, len(consumerDict))) + tdLog.exit("act consumer error!") + + # total rows of all consumers + totalRows = 0 + for key in consumerDict: + totalRows += int(consumerDict[key]) + + if totalRows < actConsumeTotalRows: + tdLog.info("expect consume total rows: %d, act consume total rows: %d"%(actConsumeTotalRows, totalRows)) + tdLog.exit("act consume rows error!") + return + + def tmqCase1(self): + tdLog.printNoPrefix("======== test case 1: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 2, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 1000, + 'batchNum': 100, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 10, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 0} + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + totalRowsInserted = expectRowsList[0] + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 0 + expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] + topicList = topicNameList[0] + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:500, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + consumerId = 1 + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 0") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 2 + resultList = tmqCom.selectConsumeResult(expectRows) + actConsumeTotalRows = resultList[0] + resultList[1] + + tdLog.info("two consumers poll rows: %d, %d"%(resultList[0], resultList[1])) + + tdLog.info("the consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) + if not (totalRowsInserted <= actConsumeTotalRows): + tdLog.exit("%d tmq consume rows error!"%consumerId) + + self.checkClientLog(actConsumeTotalRows, 2) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def run(self): + tdSql.prepare() + self.prepareTestEnv() + self.tmqCase1() + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/win-test-file b/tests/system-test/win-test-file index 96ffa63707494139a156b1d8b8a276bd7cf03a5b..24972d388b67d2cf4390cebe1f7d5a61a3438f0e 100644 --- a/tests/system-test/win-test-file +++ b/tests/system-test/win-test-file @@ -1,4 +1,115 @@ +python3 ./test.py -f 2-query/nestedQuery.py +python3 ./test.py -f 2-query/nestedQuery_str.py +python3 ./test.py -f 2-query/nestedQuery_math.py +python3 ./test.py -f 2-query/nestedQuery_time.py +python3 ./test.py -f 2-query/nestedQuery_26.py +python3 ./test.py -f 2-query/nestedQuery_str.py -Q 2 +python3 ./test.py -f 2-query/nestedQuery_math.py -Q 2 +python3 ./test.py -f 2-query/nestedQuery_time.py -Q 2 +python3 ./test.py -f 2-query/nestedQuery.py -Q 2 +python3 ./test.py -f 2-query/nestedQuery_26.py -Q 2 +python3 ./test.py -f 2-query/columnLenUpdated.py +python3 ./test.py -f 2-query/columnLenUpdated.py -Q 2 +python3 ./test.py -f 2-query/columnLenUpdated.py -Q 3 +python3 ./test.py -f 2-query/columnLenUpdated.py -Q 4 +python3 ./test.py -f 2-query/nestedQuery.py -Q 4 +python3 ./test.py -f 2-query/nestedQuery_str.py -Q 4 +python3 ./test.py -f 2-query/nestedQuery_math.py -Q 4 +python3 ./test.py -f 2-query/nestedQuery_time.py -Q 4 +python3 ./test.py -f 2-query/nestedQuery_26.py -Q 4 +python3 ./test.py -f 7-tmq/tmqShow.py +python3 ./test.py -f 7-tmq/tmqDropStb.py +python3 ./test.py -f 7-tmq/subscribeStb0.py +python3 ./test.py -f 7-tmq/subscribeStb1.py +python3 ./test.py -f 7-tmq/subscribeStb2.py +python3 ./test.py -f 7-tmq/subscribeStb3.py +python3 ./test.py -f 7-tmq/subscribeDb0.py -N 3 -n 3 +python3 ./test.py -f 7-tmq/ins_topics_test.py +python3 ./test.py -f 1-insert/delete_stable.py +python3 ./test.py -f 2-query/out_of_order.py -Q 3 +python3 ./test.py -f 2-query/out_of_order.py +python3 ./test.py -f 2-query/insert_null_none.py +python3 ./test.py -f 2-query/insert_null_none.py -R +python3 ./test.py -f 2-query/insert_null_none.py -Q 2 +python3 ./test.py -f 2-query/insert_null_none.py -Q 3 +python3 ./test.py -f 2-query/insert_null_none.py -Q 4 python3 ./test.py -f 1-insert/database_pre_suf.py +python3 ./test.py -f 2-query/concat.py -Q 3 +python3 ./test.py -f 2-query/out_of_order.py -Q 2 +python3 ./test.py -f 2-query/out_of_order.py -Q 4 +python3 ./test.py -f 2-query/nestedQuery.py -Q 3 +python3 ./test.py -f 2-query/nestedQuery_str.py -Q 3 +python3 ./test.py -f 2-query/nestedQuery_math.py -Q 3 +python3 ./test.py -f 2-query/nestedQuery_time.py -Q 3 +python3 ./test.py -f 2-query/nestedQuery_26.py -Q 3 +python3 ./test.py -f 2-query/select_null.py +python3 ./test.py -f 2-query/select_null.py -R +python3 ./test.py -f 2-query/select_null.py -Q 2 +python3 ./test.py -f 2-query/select_null.py -Q 3 +python3 ./test.py -f 2-query/select_null.py -Q 4 +python3 ./test.py -f 2-query/slimit.py +python3 ./test.py -f 2-query/slimit.py -R +python3 ./test.py -f 2-query/slimit.py -Q 2 +python3 ./test.py -f 2-query/slimit.py -Q 3 +python3 ./test.py -f 2-query/slimit.py -Q 4 +python3 ./test.py -f 3-enterprise/restore/restoreDnode.py -N 5 -M 3 +python3 ./test.py -f 3-enterprise/restore/restoreVnode.py -N 5 -M 3 +python3 ./test.py -f 3-enterprise/restore/restoreMnode.py -N 5 -M 3 +python3 ./test.py -f 3-enterprise/restore/restoreQnode.py -N 5 -M 3 +python3 ./test.py -f 7-tmq/create_wrong_topic.py +python3 ./test.py -f 7-tmq/dropDbR3ConflictTransaction.py -N 3 +python3 ./test.py -f 7-tmq/basic5.py +python3 ./test.py -f 7-tmq/subscribeDb.py -N 3 -n 3 +python3 ./test.py -f 7-tmq/subscribeDb1.py +python3 ./test.py -f 7-tmq/subscribeDb2.py +python3 ./test.py -f 7-tmq/subscribeDb3.py +python3 ./test.py -f 7-tmq/subscribeDb4.py +python3 ./test.py -f 7-tmq/subscribeStb.py +python3 ./test.py -f 7-tmq/subscribeStb4.py +python3 ./test.py -f 7-tmq/db.py +python3 ./test.py -f 7-tmq/tmqError.py +python3 ./test.py -f 7-tmq/schema.py +python3 ./test.py -f 7-tmq/stbFilterWhere.py +python3 ./test.py -f 7-tmq/stbFilter.py +python3 ./test.py -f 7-tmq/tmqCheckData.py +python3 ./test.py -f 7-tmq/tmqCheckData1.py +python3 ./test.py -f 7-tmq/tmqConsumerGroup.py +python3 ./test.py -f 7-tmq/tmqAlterSchema.py +python3 ./test.py -f 7-tmq/tmqConsFromTsdb.py -N 3 -n 3 +python3 ./test.py -f 7-tmq/tmqConsFromTsdb1.py -N 3 -n 3 +python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg.py +python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg.py +python3 ./test.py -f 7-tmq/tmqConsFromTsdb-1ctb.py +python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-1ctb.py +python3 ./test.py -f 7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py +python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py +python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py +python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py +python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py +python3 ./test.py -f 7-tmq/tmqAutoCreateTbl.py +python3 ./test.py -f 7-tmq/tmqDnodeRestart.py +python3 ./test.py -f 7-tmq/tmqDnodeRestart1.py +python3 ./test.py -f 7-tmq/tmqUpdate-1ctb.py +python3 ./test.py -f 7-tmq/tmqUpdateWithConsume.py -N 3 -n 3 +python3 ./test.py -f 7-tmq/tmqUpdate-multiCtb-snapshot0.py +python3 ./test.py -f 7-tmq/tmqUpdate-multiCtb-snapshot1.py +python3 ./test.py -f 7-tmq/tmqDelete-1ctb.py +python3 ./test.py -f 7-tmq/tmqDelete-multiCtb.py -N 3 -n 3 +python3 ./test.py -f 7-tmq/tmqDropStbCtb.py +python3 ./test.py -f 7-tmq/tmqDropNtb-snapshot0.py +python3 ./test.py -f 7-tmq/tmqDropNtb-snapshot1.py +python3 ./test.py -f 7-tmq/stbTagFilter-1ctb.py +python3 ./test.py -f 7-tmq/dataFromTsdbNWal.py +python3 ./test.py -f 7-tmq/dataFromTsdbNWal-multiCtb.py +python3 ./test.py -f 7-tmq/tmq_taosx.py +python3 ./test.py -f 7-tmq/raw_block_interface_test.py +python3 ./test.py -f 7-tmq/stbTagFilter-multiCtb.py +python3 ./test.py -f 7-tmq/tmqSubscribeStb-r3.py -N 5 +python3 ./test.py -f 7-tmq/tmq3mnodeSwitch.py -N 6 -M 3 +python3 ./test.py -f 7-tmq/tmq3mnodeSwitch.py -N 6 -M 3 -n 3 +python3 ./test.py -f 99-TDcase/TD-19201.py +python3 ./test.py -f 99-TDcase/TD-21561.py +python3 ./test.py -f 99-TDcase/TS-3404.py python3 ./test.py -f 0-others/balance_vgroups_r1.py -N 6 python3 ./test.py -f 0-others/taosShell.py python3 ./test.py -f 0-others/taosShellError.py @@ -11,10 +122,14 @@ python3 ./test.py -f 0-others/cachemodel.py python3 ./test.py -f 0-others/sysinfo.py python3 ./test.py -f 0-others/user_control.py python3 ./test.py -f 0-others/user_manage.py +python3 ./test.py -f 0-others/user_privilege.py python3 ./test.py -f 0-others/fsync.py -#python3 ./test.py -f 0-others/compatibility.py +python3 ./test.py -f 0-others/multilevel.py +python3 ./test.py -f 0-others/compatibility.py python3 ./test.py -f 0-others/tag_index_basic.py -# python3 ./test.py -f 1-insert/alter_database.py +python3 ./test.py -N 3 -f 0-others/walRetention.py +python3 ./test.py -f 1-insert/alter_database.py +python3 ./test.py -f 1-insert/alter_replica.py -N 3 python3 ./test.py -f 1-insert/influxdb_line_taosc_insert.py python3 ./test.py -f 1-insert/opentsdb_telnet_line_taosc_insert.py python3 ./test.py -f 1-insert/opentsdb_json_taosc_insert.py @@ -25,15 +140,39 @@ python3 ./test.py -f 1-insert/alter_table.py python3 ./test.py -f 1-insert/boundary.py python3 ./test.py -f 1-insert/insertWithMoreVgroup.py python3 ./test.py -f 1-insert/table_comment.py -#python3 ./test.py -f 1-insert/time_range_wise.py -#python3 ./test.py -f 1-insert/block_wise.py -#python3 ./test.py -f 1-insert/create_retentions.py python3 ./test.py -f 1-insert/mutil_stage.py python3 ./test.py -f 1-insert/table_param_ttl.py python3 ./test.py -f 1-insert/table_param_ttl.py -R python3 ./test.py -f 1-insert/update_data_muti_rows.py python3 ./test.py -f 1-insert/db_tb_name_check.py python3 ./test.py -f 1-insert/InsertFuturets.py +python3 ./test.py -f 1-insert/insert_wide_column.py +python3 ./test.py -f 1-insert/rowlength64k_benchmark.py +python3 ./test.py -f 1-insert/rowlength64k.py +python3 ./test.py -f 1-insert/rowlength64k.py -R +python3 ./test.py -f 1-insert/rowlength64k.py -Q 2 +python3 ./test.py -f 1-insert/rowlength64k.py -Q 3 +python3 ./test.py -f 1-insert/rowlength64k.py -Q 4 +python3 ./test.py -f 1-insert/rowlength64k_1.py +python3 ./test.py -f 1-insert/rowlength64k_1.py -R +python3 ./test.py -f 1-insert/rowlength64k_1.py -Q 2 +python3 ./test.py -f 1-insert/rowlength64k_1.py -Q 3 +python3 ./test.py -f 1-insert/rowlength64k_1.py -Q 4 +python3 ./test.py -f 1-insert/rowlength64k_2.py +python3 ./test.py -f 1-insert/rowlength64k_2.py -R +python3 ./test.py -f 1-insert/rowlength64k_2.py -Q 2 +python3 ./test.py -f 1-insert/rowlength64k_2.py -Q 3 +python3 ./test.py -f 1-insert/rowlength64k_2.py -Q 4 +python3 ./test.py -f 1-insert/rowlength64k_3.py +python3 ./test.py -f 1-insert/rowlength64k_3.py -R +python3 ./test.py -f 1-insert/rowlength64k_3.py -Q 2 +python3 ./test.py -f 1-insert/rowlength64k_3.py -Q 3 +python3 ./test.py -f 1-insert/rowlength64k_3.py -Q 4 +python3 ./test.py -f 1-insert/rowlength64k_4.py +python3 ./test.py -f 1-insert/rowlength64k_4.py -R +python3 ./test.py -f 1-insert/rowlength64k_4.py -Q 2 +python3 ./test.py -f 1-insert/rowlength64k_4.py -Q 3 +python3 ./test.py -f 1-insert/rowlength64k_4.py -Q 4 python3 ./test.py -f 0-others/show.py python3 ./test.py -f 0-others/information_schema.py python3 ./test.py -f 2-query/abs.py @@ -138,6 +277,7 @@ python3 ./test.py -f 2-query/mavg.py -R python3 ./test.py -f 2-query/max_partition.py python3 ./test.py -f 2-query/max_partition.py -R python3 ./test.py -f 2-query/max_min_last_interval.py +python3 ./test.py -f 2-query/last_row_interval.py python3 ./test.py -f 2-query/max.py python3 ./test.py -f 2-query/max.py -R python3 ./test.py -f 2-query/min.py @@ -160,6 +300,7 @@ python3 ./test.py -f 2-query/sample.py python3 ./test.py -f 2-query/sample.py -R python3 ./test.py -f 2-query/sin.py python3 ./test.py -f 2-query/sin.py -R +python3 ./test.py -f 2-query/smaBasic.py -N 3 python3 ./test.py -f 2-query/smaTest.py python3 ./test.py -f 2-query/smaTest.py -R python3 ./test.py -f 2-query/sml.py @@ -212,11 +353,13 @@ python3 ./test.py -f 2-query/case_when.py python3 ./test.py -f 2-query/case_when.py -R python3 ./test.py -f 2-query/blockSMA.py python3 ./test.py -f 2-query/blockSMA.py -R +python3 ./test.py -f 2-query/projectionDesc.py +python3 ./test.py -f 2-query/projectionDesc.py -R python3 ./test.py -f 1-insert/update_data.py python3 ./test.py -f 1-insert/tb_100w_data_order.py -python3 ./test.py -f 1-insert/delete_stable.py python3 ./test.py -f 1-insert/delete_childtable.py python3 ./test.py -f 1-insert/delete_normaltable.py +python3 ./test.py -f 1-insert/delete_systable.py python3 ./test.py -f 1-insert/keep_expired.py python3 ./test.py -f 1-insert/stmt_error.py python3 ./test.py -f 1-insert/drop.py @@ -225,17 +368,18 @@ python3 ./test.py -f 2-query/join2.py python3 ./test.py -f 2-query/union1.py python3 ./test.py -f 2-query/concat2.py python3 ./test.py -f 2-query/json_tag.py -python3 ./test.py -f 2-query/nestedQuery.py python3 ./test.py -f 2-query/nestedQueryInterval.py -python3 ./test.py -f 2-query/nestedQuery_str.py -python3 ./test.py -f 2-query/nestedQuery_math.py -python3 ./test.py -f 2-query/nestedQuery_time.py +python3 ./test.py -f 2-query/systable_func.py python3 ./test.py -f 2-query/stablity.py python3 ./test.py -f 2-query/stablity_1.py python3 ./test.py -f 2-query/elapsed.py python3 ./test.py -f 2-query/csum.py python3 ./test.py -f 2-query/function_diff.py python3 ./test.py -f 2-query/tagFilter.py +python3 ./test.py -f 2-query/projectionDesc.py +python3 ./test.py -f 2-query/ts_3398.py -N 3 -n 3 +python3 ./test.py -f 2-query/ts_3405.py -N 3 -n 3 +python3 ./test.py -f 2-query/ts_3423.py -N 3 -n 3 python3 ./test.py -f 2-query/queryQnode.py python3 ./test.py -f 6-cluster/5dnode1mnode.py python3 ./test.py -f 6-cluster/5dnode2mnode.py -N 5 @@ -247,7 +391,7 @@ python3 ./test.py -f 6-cluster/5dnode3mnodeStopLoop.py -N 5 -M 3 python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py -N 6 -M 3 python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py -N 6 -M 3 -n 3 python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py -N 6 -M 3 -#python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py -N 6 -M 3 -n 3 +python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py -N 6 -M 3 -n 3 python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py -N 6 -M 3 python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py -N 6 -M 3 -n 3 python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeModifyMeta.py -N 6 -M 3 @@ -260,6 +404,8 @@ python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateStb.py -N 6 - python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateStb.py -N 6 -M 3 -n 3 python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertData.py -N 6 -M 3 python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertData.py -N 6 -M 3 -n 3 +python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3 +python3 ./test.py -f 6-cluster/manually-test/6dnode3mnodeInsertLessDataAlterRep3to1to3.py -N 6 -M 3 python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6 python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6 -n 3 python3 ./test.py -f 6-cluster/5dnode3mnodeRecreateMnode.py -N 5 -M 3 @@ -271,65 +417,6 @@ python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_que python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py -N 4 -M 1 python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py -N 4 -M 1 python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py -N 4 -M 1 -python3 ./test.py -f 7-tmq/create_wrong_topic.py -python3 ./test.py -f 7-tmq/dropDbR3ConflictTransaction.py -N 3 -python3 ./test.py -f 7-tmq/basic5.py -python3 ./test.py -f 7-tmq/subscribeDb.py -N 3 -n 3 -python3 ./test.py -f 7-tmq/subscribeDb0.py -N 3 -n 3 -python3 ./test.py -f 7-tmq/subscribeDb1.py -python3 ./test.py -f 7-tmq/subscribeDb2.py -python3 ./test.py -f 7-tmq/subscribeDb3.py -python3 ./test.py -f 7-tmq/subscribeDb4.py -python3 ./test.py -f 7-tmq/subscribeStb.py -python3 ./test.py -f 7-tmq/subscribeStb0.py -python3 ./test.py -f 7-tmq/subscribeStb1.py -python3 ./test.py -f 7-tmq/subscribeStb2.py -python3 ./test.py -f 7-tmq/subscribeStb3.py -python3 ./test.py -f 7-tmq/subscribeStb4.py -python3 ./test.py -f 7-tmq/db.py -python3 ./test.py -f 7-tmq/tmqError.py -python3 ./test.py -f 7-tmq/schema.py -python3 ./test.py -f 7-tmq/stbFilter.py -python3 ./test.py -f 7-tmq/tmqCheckData.py -python3 ./test.py -f 7-tmq/tmqCheckData1.py -python3 ./test.py -f 7-tmq/tmqConsumerGroup.py -python3 ./test.py -f 7-tmq/tmqShow.py -python3 ./test.py -f 7-tmq/tmqAlterSchema.py -python3 ./test.py -f 7-tmq/tmqConsFromTsdb.py -N 3 -n 3 -python3 ./test.py -f 7-tmq/tmqConsFromTsdb1.py -N 3 -n 3 -python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg.py -python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg.py -python3 ./test.py -f 7-tmq/tmqConsFromTsdb-1ctb.py -python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-1ctb.py -python3 ./test.py -f 7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py -python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py -python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py -python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py -python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py -python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py -python3 ./test.py -f 7-tmq/tmqAutoCreateTbl.py -python3 ./test.py -f 7-tmq/tmqDnodeRestart.py -python3 ./test.py -f 7-tmq/tmqDnodeRestart1.py -python3 ./test.py -f 7-tmq/tmqUpdate-1ctb.py -python3 ./test.py -f 7-tmq/tmqUpdateWithConsume.py -N 3 -n 3 -python3 ./test.py -f 7-tmq/tmqUpdate-multiCtb-snapshot0.py -python3 ./test.py -f 7-tmq/tmqUpdate-multiCtb-snapshot1.py -python3 ./test.py -f 7-tmq/tmqDelete-1ctb.py -python3 ./test.py -f 7-tmq/tmqDelete-multiCtb.py -N 3 -n 3 -python3 ./test.py -f 7-tmq/tmqDropStb.py -python3 ./test.py -f 7-tmq/tmqDropStbCtb.py -python3 ./test.py -f 7-tmq/tmqDropNtb-snapshot0.py -python3 ./test.py -f 7-tmq/tmqDropNtb-snapshot1.py -python3 ./test.py -f 7-tmq/stbTagFilter-1ctb.py -python3 ./test.py -f 7-tmq/dataFromTsdbNWal.py -python3 ./test.py -f 7-tmq/dataFromTsdbNWal-multiCtb.py -python3 ./test.py -f 7-tmq/tmq_taosx.py -python3 ./test.py -f 7-tmq/stbTagFilter-multiCtb.py -python3 ./test.py -f 99-TDcase/TD-19201.py -python3 ./test.py -f 99-TDcase/TD-21561.py -python3 ./test.py -f 7-tmq/tmqSubscribeStb-r3.py -N 5 -python3 ./test.py -f 7-tmq/tmq3mnodeSwitch.py -N 6 -M 3 -python3 ./test.py -f 7-tmq/tmq3mnodeSwitch.py -N 6 -M 3 -n 3 python3 ./test.py -f 2-query/between.py -Q 2 python3 ./test.py -f 2-query/distinct.py -Q 2 python3 ./test.py -f 2-query/varchar.py -Q 2 @@ -389,11 +476,7 @@ python3 ./test.py -f 2-query/arccos.py -Q 2 python3 ./test.py -f 2-query/arctan.py -Q 2 python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 2 python3 ./test.py -f 2-query/interp.py -Q 2 -python3 ./test.py -f 2-query/nestedQuery.py -Q 2 python3 ./test.py -f 2-query/nestedQueryInterval.py -Q 2 -python3 ./test.py -f 2-query/nestedQuery_str.py -Q 2 -python3 ./test.py -f 2-query/nestedQuery_math.py -Q 2 -python3 ./test.py -f 2-query/nestedQuery_time.py -Q 2 python3 ./test.py -f 2-query/stablity.py -Q 2 python3 ./test.py -f 2-query/stablity_1.py -Q 2 python3 ./test.py -f 2-query/avg.py -Q 2 @@ -422,11 +505,13 @@ python3 ./test.py -f 2-query/function_null.py -Q 2 python3 ./test.py -f 2-query/count_partition.py -Q 2 python3 ./test.py -f 2-query/max_partition.py -Q 2 python3 ./test.py -f 2-query/max_min_last_interval.py -Q 2 +python3 ./test.py -f 2-query/last_row_interval.py -Q 2 python3 ./test.py -f 2-query/last_row.py -Q 2 python3 ./test.py -f 2-query/tsbsQuery.py -Q 2 python3 ./test.py -f 2-query/sml.py -Q 2 python3 ./test.py -f 2-query/case_when.py -Q 2 python3 ./test.py -f 2-query/blockSMA.py -Q 2 +python3 ./test.py -f 2-query/projectionDesc.py -Q 2 python3 ./test.py -f 99-TDcase/TD-21561.py -Q 2 python3 ./test.py -f 2-query/between.py -Q 3 python3 ./test.py -f 2-query/distinct.py -Q 3 @@ -443,7 +528,6 @@ python3 ./test.py -f 2-query/cast.py -Q 3 python3 ./test.py -f 2-query/substr.py -Q 3 python3 ./test.py -f 2-query/union.py -Q 3 python3 ./test.py -f 2-query/union1.py -Q 3 -python3 ./test.py -f 2-query/concat.py -Q 3 python3 ./test.py -f 2-query/concat2.py -Q 3 python3 ./test.py -f 2-query/concat_ws.py -Q 3 python3 ./test.py -f 2-query/concat_ws2.py -Q 3 @@ -515,12 +599,14 @@ python3 ./test.py -f 2-query/function_null.py -Q 3 python3 ./test.py -f 2-query/count_partition.py -Q 3 python3 ./test.py -f 2-query/max_partition.py -Q 3 python3 ./test.py -f 2-query/max_min_last_interval.py -Q 3 +python3 ./test.py -f 2-query/last_row_interval.py -Q 3 python3 ./test.py -f 2-query/last_row.py -Q 3 python3 ./test.py -f 2-query/tsbsQuery.py -Q 3 python3 ./test.py -f 2-query/sml.py -Q 3 python3 ./test.py -f 2-query/interp.py -Q 3 python3 ./test.py -f 2-query/case_when.py -Q 3 python3 ./test.py -f 2-query/blockSMA.py -Q 3 +python3 ./test.py -f 2-query/projectionDesc.py -Q 3 python3 ./test.py -f 99-TDcase/TD-21561.py -Q 3 python3 ./test.py -f 2-query/between.py -Q 4 python3 ./test.py -f 2-query/distinct.py -Q 4 @@ -560,6 +646,7 @@ python3 ./test.py -f 2-query/To_unixtimestamp.py -Q 4 python3 ./test.py -f 2-query/timetruncate.py -Q 4 python3 ./test.py -f 2-query/diff.py -Q 4 python3 ./test.py -f 2-query/Timediff.py -Q 4 +python3 ./test.py -f 2-query/json_tag.py -Q 4 python3 ./test.py -f 2-query/top.py -Q 4 python3 ./test.py -f 2-query/bottom.py -Q 4 python3 ./test.py -f 2-query/percentile.py -Q 4 @@ -578,11 +665,9 @@ python3 ./test.py -f 2-query/arcsin.py -Q 4 python3 ./test.py -f 2-query/arccos.py -Q 4 python3 ./test.py -f 2-query/arctan.py -Q 4 python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 4 -python3 ./test.py -f 2-query/nestedQuery.py -Q 4 python3 ./test.py -f 2-query/nestedQueryInterval.py -Q 4 -python3 ./test.py -f 2-query/nestedQuery_str.py -Q 4 -python3 ./test.py -f 2-query/nestedQuery_math.py -Q 4 -python3 ./test.py -f 2-query/nestedQuery_time.py -Q 4 +python3 ./test.py -f 2-query/stablity.py -Q 4 +python3 ./test.py -f 2-query/stablity_1.py -Q 4 python3 ./test.py -f 2-query/avg.py -Q 4 python3 ./test.py -f 2-query/elapsed.py -Q 4 python3 ./test.py -f 2-query/csum.py -Q 4 @@ -607,6 +692,7 @@ python3 ./test.py -f 2-query/function_null.py -Q 4 python3 ./test.py -f 2-query/count_partition.py -Q 4 python3 ./test.py -f 2-query/max_partition.py -Q 4 python3 ./test.py -f 2-query/max_min_last_interval.py -Q 4 +python3 ./test.py -f 2-query/last_row_interval.py -Q 4 python3 ./test.py -f 2-query/last_row.py -Q 4 python3 ./test.py -f 2-query/tsbsQuery.py -Q 4 python3 ./test.py -f 2-query/sml.py -Q 4 @@ -617,22 +703,9 @@ python3 ./test.py -f 2-query/insert_select.py -R python3 ./test.py -f 2-query/insert_select.py -Q 2 python3 ./test.py -f 2-query/insert_select.py -Q 3 python3 ./test.py -f 2-query/insert_select.py -Q 4 -python3 ./test.py -f 2-query/insert_null_none.py -python3 ./test.py -f 2-query/insert_null_none.py -R -python3 ./test.py -f 2-query/insert_null_none.py -Q 2 -python3 ./test.py -f 2-query/insert_null_none.py -Q 3 -python3 ./test.py -f 2-query/insert_null_none.py -Q 4 -python3 ./test.py -f 2-query/out_of_order.py python3 ./test.py -f 2-query/out_of_order.py -R -python3 ./test.py -f 2-query/out_of_order.py -Q 2 -python3 ./test.py -f 2-query/out_of_order.py -Q 3 -python3 ./test.py -f 2-query/out_of_order.py -Q 4 -python3 ./test.py -f 2-query/max_min_data.py -python3 ./test.py -f 2-query/max_min_data.py -R -python3 ./test.py -f 2-query/max_min_data.py -Q 2 -python3 ./test.py -f 2-query/max_min_data.py -Q 3 -python3 ./test.py -f 2-query/max_min_data.py -Q 4 python3 ./test.py -f 2-query/blockSMA.py -Q 4 +python3 ./test.py -f 2-query/projectionDesc.py -Q 4 python3 ./test.py -f 2-query/odbc.py python3 ./test.py -f 99-TDcase/TD-21561.py -Q 4 python3 ./test.py -f 99-TDcase/TD-20582.py diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 9e8d91b6b6fb7d148027e11c3ecdcfc985af7547..e8826584080065245fb5604dfbd07b4e538ca599 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -136,7 +136,33 @@ ELSE () COMMAND cmake -E echo "Copy taosadapter-debug.exe" COMMAND cmake -E copy taosadapter-debug.exe ${CMAKE_BINARY_DIR}/build/bin ) - ELSE (TD_WINDOWS) + ELSEIF (TD_DARWIN) + MESSAGE("Building taosAdapter on MACOS") + INCLUDE(ExternalProject) + ExternalProject_Add(taosadapter + PREFIX "taosadapter" + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter + BUILD_ALWAYS off + DEPENDS taos + BUILD_IN_SOURCE 1 + CONFIGURE_COMMAND cmake -E echo "taosadapter no need cmake to config" + PATCH_COMMAND + COMMAND git clean -f -d + BUILD_COMMAND + # COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}" + COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}" + COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}" + INSTALL_COMMAND + COMMAND cmake -E echo "Copy taosadapter" + COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin + COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ + COMMAND cmake -E echo "Copy taosadapter.toml" + COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/ + COMMAND cmake -E copy ./taosadapter.service ${CMAKE_BINARY_DIR}/test/cfg/ + COMMAND cmake -E echo "Copy taosadapter-debug" + COMMAND cmake -E copy taosadapter-debug ${CMAKE_BINARY_DIR}/build/bin + ) + ELSE () MESSAGE("Building taosAdapter on non-Windows") INCLUDE(ExternalProject) ExternalProject_Add(taosadapter @@ -164,5 +190,5 @@ ELSE () COMMAND cmake -E echo "Copy taosadapter-debug" COMMAND cmake -E copy taosadapter-debug ${CMAKE_BINARY_DIR}/build/bin ) - ENDIF (TD_WINDOWS) + ENDIF () ENDIF () diff --git a/utils/test/c/sml_test.c b/utils/test/c/sml_test.c index 94619339e9f79920a194c60d8a8386a71c5c2c20..dad30db02a52db6bcc5d4992039b61b8ec939125 100644 --- a/utils/test/c/sml_test.c +++ b/utils/test/c/sml_test.c @@ -1536,9 +1536,9 @@ int main(int argc, char *argv[]) { ASSERT(!ret); ret = sml_ts3116_Test(); ASSERT(!ret); -// ret = sml_ts2385_Test(); // this test case need config sml table name using ./sml_test config_file -// ASSERT(!ret); - ret = sml_ts3303_Test(); // this test case need config sml table name using ./sml_test config_file + ret = sml_ts2385_Test(); // this test case need config sml table name using ./sml_test config_file + ASSERT(!ret); + ret = sml_ts3303_Test(); ASSERT(!ret); // for(int i = 0; i < sizeof(str)/sizeof(str[0]); i++){